repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
chrishas35/django-travis-ci | django/contrib/databrowse/plugins/calendars.py | 86 | 5742 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.text import capfirst
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.views.generic import dates
from django.utils import datetime_safe
class DateViewMixin(object):
allow_empty = False
allow_future = True
root_url = None
model = None
field = None
def get_context_data(self, **kwargs):
context = super(DateViewMixin, self).get_context_data(**kwargs)
context.update({
'root_url': self.root_url,
'model': self.model,
'field': self.field
})
return context
class DayView(DateViewMixin, dates.DayArchiveView):
template_name = 'databrowse/calendar_day.html'
class MonthView(DateViewMixin, dates.MonthArchiveView):
template_name = 'databrowse/calendar_month.html'
class YearView(DateViewMixin, dates.YearArchiveView):
template_name = 'databrowse/calendar_year.html'
class IndexView(DateViewMixin, dates.ArchiveIndexView):
template_name = 'databrowse/calendar_main.html'
class CalendarPlugin(DatabrowsePlugin):
def __init__(self, field_names=None):
self.field_names = field_names
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set, it takes
take that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)])
else:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and f.name in self.field_names])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(u'<p class="filter"><strong>View calendar by:</strong> %s</p>' % \
u', '.join(['<a href="calendars/%s/">%s</a>' % (f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values()]))
def urls(self, plugin_name, easy_instance_field):
if isinstance(easy_instance_field.field, models.DateField):
d = easy_instance_field.raw_value
return [mark_safe(u'%s%s/%s/%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
str(d.year),
datetime_safe.new_date(d).strftime('%b').lower(),
d.day))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no DateFields, there's no point in going further.
if not self.fields:
raise http.Http404('The requested model has no calendars.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/')
if url_bits[0] in self.fields:
return self.calendar_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(key=lambda k:k.verbose_name)
return render_to_response('databrowse/calendar_homepage.html', {
'root_url': self.site.root_url,
'model': easy_model,
'field_list': field_list
})
def calendar_view(self, request, field, year=None, month=None, day=None):
easy_model = EasyModel(self.site, self.model)
root_url = self.site.root_url
if day is not None:
return DayView.as_view(
year=year, month=month, day=day,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif month is not None:
return MonthView.as_view(
year=year, month=month,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif year is not None:
return YearView.as_view(
year=year,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
else:
return IndexView.as_view(
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
assert False, ('%s, %s, %s, %s' % (field, year, month, day))
| bsd-3-clause |
Elnya/aosproject | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
ruba9/HotelChatbot | my_env/lib/python3.6/site-packages/wheel/signatures/__init__.py | 565 | 3779 | """
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| mit |
joariasl/odoo | addons/crm/crm_segmentation.py | 333 | 9067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv,orm
class crm_segmentation(osv.osv):
'''
A segmentation is a tool to automatically assign categories on partners.
These assignations are based on criterions.
'''
_name = "crm.segmentation"
_description = "Partner Segmentation"
_columns = {
'name': fields.char('Name', required=True, help='The name of the segmentation.'),
'description': fields.text('Description'),
'categ_id': fields.many2one('res.partner.category', 'Partner Category',\
required=True, help='The partner category that will be \
added to partners that match the segmentation criterions after computation.'),
'exclusif': fields.boolean('Exclusive', help='Check if the category is limited to partners that match the segmentation criterions.\
\nIf checked, remove the category from partners that doesn\'t match segmentation criterions'),
'state': fields.selection([('not running','Not Running'),\
('running','Running')], 'Execution Status', readonly=True),
'partner_id': fields.integer('Max Partner ID processed'),
'segmentation_line': fields.one2many('crm.segmentation.line', \
'segmentation_id', 'Criteria', required=True, copy=True),
'sales_purchase_active': fields.boolean('Use The Sales Purchase Rules', help='Check if you want to use this tab as part of the segmentation rule. If not checked, the criteria beneath will be ignored')
}
_defaults = {
'partner_id': lambda *a: 0,
'state': lambda *a: 'not running',
}
def process_continue(self, cr, uid, ids, start=False):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process continue’s IDs"""
partner_obj = self.pool.get('res.partner')
categs = self.read(cr, uid, ids, ['categ_id', 'exclusif', 'sales_purchase_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel \
where category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) \
values (%s,%s)', (categ['categ_id'][0], partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
def process_stop(self, cr, uid, ids, *args):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process stop’s IDs"""
return self.write(cr, uid, ids, {'state':'not running', 'partner_id':0})
def process_start(self, cr, uid, ids, *args):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process start’s IDs """
self.write(cr, uid, ids, {'state':'running', 'partner_id':0})
return self.process_continue(cr, uid, ids, start=True)
class crm_segmentation_line(osv.osv):
""" Segmentation line """
_name = "crm.segmentation.line"
_description = "Segmentation line"
_columns = {
'name': fields.char('Rule Name', required=True),
'segmentation_id': fields.many2one('crm.segmentation', 'Segmentation'),
'expr_name': fields.selection([('sale','Sale Amount'),
('purchase','Purchase Amount')], 'Control Variable', required=True),
'expr_operator': fields.selection([('<','<'),('=','='),('>','>')], 'Operator', required=True),
'expr_value': fields.float('Value', required=True),
'operator': fields.selection([('and','Mandatory Expression'),\
('or','Optional Expression')],'Mandatory / Optional', required=True),
}
_defaults = {
'expr_name': lambda *a: 'sale',
'expr_operator': lambda *a: '>',
'operator': lambda *a: 'and'
}
def test(self, cr, uid, ids, partner_id):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Test’s IDs """
expression = {'<': lambda x,y: x<y, '=':lambda x,y:x==y, '>':lambda x,y:x>y}
ok = False
lst = self.read(cr, uid, ids)
for l in lst:
cr.execute('select * from ir_module_module where name=%s and state=%s', ('account','installed'))
if cr.fetchone():
if l['expr_name']=='sale':
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'out_invoice\'',
(partner_id,))
value = cr.fetchone()[0] or 0.0
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'out_refund\'',
(partner_id,))
value -= cr.fetchone()[0] or 0.0
elif l['expr_name']=='purchase':
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'in_invoice\'',
(partner_id,))
value = cr.fetchone()[0] or 0.0
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'in_refund\'',
(partner_id,))
value -= cr.fetchone()[0] or 0.0
res = expression[l['expr_operator']](value, l['expr_value'])
if (not res) and (l['operator']=='and'):
return False
if res:
return True
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
richardxia/asp-multilevel-debug | tests/codegen_test.py | 3 | 2288 | import unittest2 as unittest
from asp.codegen.ast_tools import *
class ReplacerTests(unittest.TestCase):
def test_num(self):
a = ast.BinOp(ast.Num(4), ast.Add(), ast.Num(9))
result = ASTNodeReplacer(ast.Num(4), ast.Num(5)).visit(a)
self.assertEqual(a.left.n, 5)
def test_Name(self):
a = ast.BinOp(ast.Num(4), ast.Add(), ast.Name("variable", None))
result = ASTNodeReplacer(ast.Name("variable", None), ast.Name("my_variable", None)).visit(a)
self.assertEqual(a.right.id, "my_variable")
class ConversionTests(unittest.TestCase):
def test_num(self):
a = ast.Num(4)
b = ConvertAST().visit(a)
self.assertEqual(str(b), "4")
def test_Name(self):
a = ast.Name("hello", None)
b = ConvertAST().visit(a)
self.assertEqual(str(b), "hello")
def test_BinOp(self):
a = ast.BinOp(ast.Num(4), ast.Add(), ast.Num(9))
b = ConvertAST().visit(a)
self.assertEqual(str(b), "(4 + 9)")
def test_UnaryOp(self):
a = ast.UnaryOp(ast.USub(), ast.Name("goober", None))
b = ConvertAST().visit(a)
self.assertEqual(str(b), "(-(goober))")
def test_Subscript(self):
a = ast.Subscript(ast.Name("hello", None),
ast.Index(ast.Num(4)),
None)
b = ConvertAST().visit(a)
self.assertEqual(str(b), "hello[4]")
def test_Assign(self):
a = ast.Assign([ast.Name("hello", None)], ast.Num(4))
b = ConvertAST().visit(a)
self.assertEqual(str(b), "hello = 4")
def test_simple_FunctionDef(self):
a = ast.FunctionDef("hello",
ast.arguments([], None, None, []),
[ast.BinOp(ast.Num(10), ast.Add(), ast.Num(20))], [])
b = ConvertAST().visit(a)
self.assertEqual(str(b), "void hello()\n{\n (10 + 20);\n}")
def test_FunctionDef_with_arguments(self):
a = ast.FunctionDef("hello",
ast.arguments([ast.Name("world", None)], None, None, []),
[ast.BinOp(ast.Num(10), ast.Add(), ast.Num(20))], [])
b = ConvertAST().visit(a)
self.assertEqual(str(b), "void hello(void *world)\n{\n (10 + 20);\n}")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jantman/awslimitchecker | awslimitchecker/tests/test_utils.py | 1 | 19953 | """
awslimitchecker/tests/test_utils.py
The latest version of this package is available at:
<https://github.com/jantman/awslimitchecker>
##############################################################################
Copyright 2015-2018 Jason Antman <jason@jasonantman.com>
This file is part of awslimitchecker, also known as awslimitchecker.
awslimitchecker is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
awslimitchecker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with awslimitchecker. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
##############################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/awslimitchecker> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
##############################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
##############################################################################
"""
import argparse
import pytest
import sys
import termcolor
from awslimitchecker.limit import AwsLimit, AwsLimitUsage
from awslimitchecker.utils import (
StoreKeyValuePair, dict2cols, paginate_dict, _get_dict_value_by_path,
_set_dict_value_by_path, _get_latest_version, color_output,
issue_string_tuple
)
# https://code.google.com/p/mock/issues/detail?id=249
# py>=3.4 should use unittest.mock not the mock package on pypi
if (
sys.version_info[0] < 3 or
sys.version_info[0] == 3 and sys.version_info[1] < 4
):
from mock import call, Mock, patch
else:
from unittest.mock import call, Mock, patch
pbm = 'awslimitchecker.utils'
class TestStoreKeyValuePair(object):
def test_argparse_works(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo', action='store', type=str)
res = parser.parse_args(['--foo=bar'])
assert res.foo == 'bar'
def test_long(self):
parser = argparse.ArgumentParser()
parser.add_argument('--one', action=StoreKeyValuePair)
res = parser.parse_args(['--one=foo=bar'])
assert res.one == {'foo': 'bar'}
def test_short(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeyValuePair)
res = parser.parse_args(['-o', 'foo=bar'])
assert res.one == {'foo': 'bar'}
def test_multi_long(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeyValuePair)
res = parser.parse_args(['--one=foo=bar', '--one=baz=blam'])
assert res.one == {'foo': 'bar', 'baz': 'blam'}
def test_multi_short(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeyValuePair)
res = parser.parse_args(['-o', 'foo=bar', '-o', 'baz=blam'])
assert res.one == {'foo': 'bar', 'baz': 'blam'}
def test_no_equals(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeyValuePair)
with pytest.raises(SystemExit) as excinfo:
parser.parse_args(['-o', 'foobar'])
if sys.version_info[0] > 2:
msg = excinfo.value.args[0]
else:
msg = excinfo.value.message
assert msg == 2
def test_quoted(self):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--one', action=StoreKeyValuePair)
res = parser.parse_args([
'-o',
'"foo some"=bar',
'--one="baz other"=blam'
])
assert res.one == {'foo some': 'bar', 'baz other': 'blam'}
class Test_dict2cols(object):
def test_simple(self):
d = {'foo': 'bar', 'baz': 'blam'}
res = dict2cols(d)
assert res == 'baz blam\nfoo bar\n'
def test_spaces(self):
d = {'foo': 'bar', 'baz': 'blam'}
res = dict2cols(d, spaces=4)
assert res == 'baz blam\nfoo bar\n'
def test_less_simple(self):
d = {
'zzz': 'bar',
'aaa': 'blam',
'abcdefghijklmnopqrstuv': 'someothervalue',
}
res = dict2cols(d)
assert res == '' + \
'aaa blam\n' + \
'abcdefghijklmnopqrstuv someothervalue\n' + \
'zzz bar\n'
def test_separator(self):
d = {'foo': 'bar', 'baz': 'blam'}
res = dict2cols(d, spaces=4, separator='.')
assert res == 'baz....blam\nfoo....bar\n'
def test_empty(self):
d = {}
res = dict2cols(d)
assert res == ''
class TestPaginateDict(object):
def test_no_marker_path(self):
func = Mock()
with pytest.raises(Exception) as excinfo:
paginate_dict(func)
ex_str = "alc_marker_path must be specified for queries " \
"that return a dict."
assert ex_str in str(excinfo.value)
def test_no_data_path(self):
func = Mock()
with pytest.raises(Exception) as excinfo:
paginate_dict(func, alc_marker_path=[])
ex_str = "alc_data_path must be specified for queries " \
"that return a dict."
assert ex_str in str(excinfo.value)
def test_no_marker_param(self):
func = Mock()
with pytest.raises(Exception) as excinfo:
paginate_dict(
func,
alc_marker_path=[],
alc_data_path=[]
)
ex_str = "alc_marker_param must be specified for queries " \
"that return a dict."
assert ex_str in str(excinfo.value)
def test_bad_path(self):
result = {
'k1': {
'badpath': {}
}
}
func = Mock()
func.return_value = result
res = paginate_dict(
func,
alc_marker_path=['k1', 'k2', 'Marker'],
alc_data_path=['k1', 'k2', 'Data'],
alc_marker_param='Marker'
)
assert res == result
assert func.mock_calls == [call()]
def test_no_marker(self):
result = {
'k1': {
'k2': {
'Data': []
}
}
}
func = Mock()
func.return_value = result
res = paginate_dict(
func,
alc_marker_path=['k1', 'k2', 'Marker'],
alc_data_path=['k1', 'k2', 'Data'],
alc_marker_param='Marker'
)
assert res == result
assert func.mock_calls == [call()]
def test_two_iterations(self):
e1 = Mock()
e2 = Mock()
e3 = Mock()
e4 = Mock()
e5 = Mock()
e6 = Mock()
func = Mock()
res1 = {
'k1': {
'k2': {
'Data': [e1, e2],
'Foo1': 'bar1',
'Marker': 'marker1'
}
}
}
res2 = {
'k1': {
'k2': {
'Data': [e3, e4],
'Foo2': 'bar2',
'Marker': 'marker2'
}
}
}
res3 = {
'k1': {
'k2': {
'Data': [e5, e6],
'Foo3': 'bar3'
}
}
}
expected = {
'k1': {
'k2': {
'Data': [e1, e2, e3, e4, e5, e6],
'Foo3': 'bar3'
}
}
}
func.side_effect = [res1, res2, res3]
res = paginate_dict(
func,
'foo',
bar='baz',
alc_marker_path=['k1', 'k2', 'Marker'],
alc_data_path=['k1', 'k2', 'Data'],
alc_marker_param='MarkerParam'
)
assert res == expected
assert func.mock_calls == [
call('foo', bar='baz'),
call(
'foo',
bar='baz',
MarkerParam='marker1'
),
call(
'foo',
bar='baz',
MarkerParam='marker2'
)
]
class TestDictFuncs(object):
def test_get_dict_value_by_path(self):
d = {
'foo': {
'bar': {
'baz': 'bazval'
}
}
}
path = ['foo', 'bar', 'baz']
res = _get_dict_value_by_path(d, path)
assert res == 'bazval'
# make sure we don't modify inputs
assert path == ['foo', 'bar', 'baz']
assert d == {
'foo': {
'bar': {
'baz': 'bazval'
}
}
}
def test_get_dict_value_by_path_obj(self):
e1 = Mock()
e2 = Mock()
d = {
'k1': {
'k2': {
'Marker': 'marker2',
'Data': [e1, e2],
'Foo2': 'bar2'
}
}
}
res = _get_dict_value_by_path(d, ['k1', 'k2', 'Data'])
assert res == [e1, e2]
def test_get_dict_value_by_path_none(self):
d = {
'foo': {
'bar': {
'blam': 'blarg'
}
}
}
res = _get_dict_value_by_path(d, ['foo', 'bar', 'baz'])
assert res is None
def test_get_dict_value_by_path_deep_none(self):
d = {'baz': 'blam'}
res = _get_dict_value_by_path(d, ['foo', 'bar', 'baz'])
assert res is None
def test_set_dict_value_by_path(self):
d = {
'foo': {
'bar': {
'baz': 'bazval'
}
}
}
path = ['foo', 'bar', 'baz']
res = _set_dict_value_by_path(d, 'blam', path)
assert res == {
'foo': {
'bar': {
'baz': 'blam'
}
}
}
# make sure we don't modify inputs
assert path == ['foo', 'bar', 'baz']
assert d == {
'foo': {
'bar': {
'baz': 'bazval'
}
}
}
def test_set_dict_value_by_path_none(self):
d = {
'foo': {
'bar': {
'blam': 'blarg'
}
}
}
res = _set_dict_value_by_path(d, 'blam', ['foo', 'bar', 'baz'])
assert res == {
'foo': {
'bar': {
'baz': 'blam',
'blam': 'blarg'
}
}
}
def test_set_dict_value_by_path_deep_none(self):
d = {'foo': 'bar'}
with pytest.raises(TypeError):
_set_dict_value_by_path(d, 'blam', ['foo', 'bar', 'baz'])
def test_set_dict_value_by_path_empty(self):
d = {'foo': 'bar'}
res = _set_dict_value_by_path(d, 'baz', [])
assert res == d
class TestGetCurrentVersion(object):
def test_exception(self):
mock_http = Mock()
with patch('%s._VERSION_TUP' % pbm, (0, 2, 3)):
with patch('%s.urllib3.PoolManager' % pbm, autospec=True) as m_pm:
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
m_pm.return_value = mock_http
mock_http.request.side_effect = RuntimeError()
res = _get_latest_version()
assert res is None
assert mock_logger.mock_calls == [
call.debug('Error getting latest version from PyPI', exc_info=True)
]
def test_older(self):
mock_http = Mock()
mock_resp = Mock(
status=200, data='{"info": {"version": "1.0.1"}}'
)
with patch('%s._VERSION_TUP' % pbm, (0, 2, 3)):
with patch('%s.urllib3.PoolManager' % pbm, autospec=True) as m_pm:
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
m_pm.return_value = mock_http
mock_http.request.return_value = mock_resp
res = _get_latest_version()
assert res == '1.0.1'
assert mock_logger.mock_calls == []
def test_equal(self):
mock_http = Mock()
mock_resp = Mock(
status=200, data='{"info": {"version": "0.2.3"}}'
)
with patch('%s._VERSION_TUP' % pbm, (0, 2, 3)):
with patch('%s.urllib3.PoolManager' % pbm, autospec=True) as m_pm:
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
m_pm.return_value = mock_http
mock_http.request.return_value = mock_resp
res = _get_latest_version()
assert res is None
assert mock_logger.mock_calls == []
def test_newer(self):
mock_http = Mock()
mock_resp = Mock(
status=200, data='{"info": {"version": "0.1.2"}}'
)
with patch('%s._VERSION_TUP' % pbm, (0, 2, 3)):
with patch('%s.urllib3.PoolManager' % pbm, autospec=True) as m_pm:
with patch('%s.logger' % pbm, autospec=True) as mock_logger:
m_pm.return_value = mock_http
mock_http.request.return_value = mock_resp
res = _get_latest_version()
assert res is None
assert mock_logger.mock_calls == []
class TestColorOutput(object):
def test_colored(self):
assert color_output('foo', 'yellow') == termcolor.colored(
'foo', 'yellow')
def test_not_colored(self):
assert color_output(
'foo', 'yellow', colorize=False
) == 'foo'
class TestIssueStringTuple(object):
def test_crit_one(self):
mock_limit = Mock(spec_set=AwsLimit)
type(mock_limit).name = 'limitname'
mock_limit.get_limit.return_value = 12
c1 = AwsLimitUsage(mock_limit, 56)
def se_color(s, c, colorize=True):
return 'xX%sXx' % s
with patch('%s.color_output' % pbm) as m_co:
m_co.side_effect = se_color
res = issue_string_tuple(
'svcname',
mock_limit,
[c1],
[]
)
assert res == ('svcname/limitname',
'(limit 12) xXCRITICAL: 56Xx')
assert m_co.mock_calls == [
call('CRITICAL: 56', 'red', colorize=True)
]
def test_crit_multi(self):
mock_limit = Mock(spec_set=AwsLimit)
type(mock_limit).name = 'limitname'
mock_limit.get_limit.return_value = 5
c1 = AwsLimitUsage(mock_limit, 10)
c2 = AwsLimitUsage(mock_limit, 12, resource_id='c2id')
c3 = AwsLimitUsage(mock_limit, 8)
def se_color(s, c, colorize=True):
return 'xX%sXx' % s
with patch('%s.color_output' % pbm) as m_co:
m_co.side_effect = se_color
res = issue_string_tuple(
'svcname',
mock_limit,
[c1, c2, c3],
[]
)
assert res == ('svcname/limitname',
'(limit 5) xXCRITICAL: 8, 10, c2id=12Xx')
assert m_co.mock_calls == [
call('CRITICAL: 8, 10, c2id=12', 'red', colorize=True)
]
def test_warn_one(self):
mock_limit = Mock(spec_set=AwsLimit)
type(mock_limit).name = 'limitname'
mock_limit.get_limit.return_value = 12
w1 = AwsLimitUsage(mock_limit, 11)
def se_color(s, c, colorize=True):
return 'xX%sXx' % s
with patch('%s.color_output' % pbm) as m_co:
m_co.side_effect = se_color
res = issue_string_tuple(
'svcname',
mock_limit,
[],
[w1]
)
assert res == ('svcname/limitname', '(limit 12) xXWARNING: 11Xx')
assert m_co.mock_calls == [
call('WARNING: 11', 'yellow', colorize=True)
]
def test_warn_multi(self):
mock_limit = Mock(spec_set=AwsLimit)
type(mock_limit).name = 'limitname'
mock_limit.get_limit.return_value = 12
w1 = AwsLimitUsage(mock_limit, 11)
w2 = AwsLimitUsage(mock_limit, 10, resource_id='w2id')
w3 = AwsLimitUsage(mock_limit, 10, resource_id='w3id')
def se_color(s, c, colorize=True):
return 'xX%sXx' % s
with patch('%s.color_output' % pbm) as m_co:
m_co.side_effect = se_color
res = issue_string_tuple(
'svcname',
mock_limit,
[],
[w1, w2, w3]
)
assert res == ('svcname/limitname',
'(limit 12) xXWARNING: w2id=10, w3id=10, 11Xx')
assert m_co.mock_calls == [
call('WARNING: w2id=10, w3id=10, 11', 'yellow', colorize=True)
]
def test_both_one(self):
mock_limit = Mock(spec_set=AwsLimit)
type(mock_limit).name = 'limitname'
mock_limit.get_limit.return_value = 12
c1 = AwsLimitUsage(mock_limit, 10)
w1 = AwsLimitUsage(mock_limit, 10, resource_id='w3id')
def se_color(s, c, colorize=True):
return 'xX%sXx' % s
with patch('%s.color_output' % pbm) as m_co:
m_co.side_effect = se_color
res = issue_string_tuple(
'svcname',
mock_limit,
[c1],
[w1],
colorize=False
)
assert res == ('svcname/limitname',
'(limit 12) xXCRITICAL: 10Xx xXWARNING: w3id=10Xx')
assert m_co.mock_calls == [
call('CRITICAL: 10', 'red', colorize=False),
call('WARNING: w3id=10', 'yellow', colorize=False)
]
def test_both_multi(self):
mock_limit = Mock(spec_set=AwsLimit)
type(mock_limit).name = 'limitname'
mock_limit.get_limit.return_value = 12
c1 = AwsLimitUsage(mock_limit, 10)
c2 = AwsLimitUsage(mock_limit, 12, resource_id='c2id')
c3 = AwsLimitUsage(mock_limit, 8)
w1 = AwsLimitUsage(mock_limit, 11)
w2 = AwsLimitUsage(mock_limit, 10, resource_id='w2id')
w3 = AwsLimitUsage(mock_limit, 10, resource_id='w3id')
def se_color(s, c, colorize=True):
return 'xX%sXx' % s
with patch('%s.color_output' % pbm) as m_co:
m_co.side_effect = se_color
res = issue_string_tuple(
'svcname',
mock_limit,
[c1, c2, c3],
[w1, w2, w3]
)
assert res == ('svcname/limitname',
'(limit 12) xXCRITICAL: 8, 10, c2id=12Xx '
'xXWARNING: w2id=10, w3id=10, 11Xx')
assert m_co.mock_calls == [
call('CRITICAL: 8, 10, c2id=12', 'red', colorize=True),
call('WARNING: w2id=10, w3id=10, 11', 'yellow', colorize=True)
]
| agpl-3.0 |
glaubitz/fs-uae-debian | arcade/OpenGL/raw/GLX/EXT/import_context.py | 8 | 1202 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_EXT_import_context'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_EXT_import_context',error_checker=_errors._error_checker)
GLX_SCREEN_EXT=_C('GLX_SCREEN_EXT',0x800C)
GLX_SHARE_CONTEXT_EXT=_C('GLX_SHARE_CONTEXT_EXT',0x800A)
GLX_VISUAL_ID_EXT=_C('GLX_VISUAL_ID_EXT',0x800B)
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXContext)
def glXFreeContextEXT(dpy,context):pass
@_f
@_p.types(_cs.GLXContextID,_cs.GLXContext)
def glXGetContextIDEXT(context):pass
@_f
@_p.types(ctypes.POINTER(_cs.Display),)
def glXGetCurrentDisplayEXT():pass
@_f
@_p.types(_cs.GLXContext,ctypes.POINTER(_cs.Display),_cs.GLXContextID)
def glXImportContextEXT(dpy,contextID):pass
@_f
@_p.types(_cs.c_int,ctypes.POINTER(_cs.Display),_cs.GLXContext,_cs.c_int,ctypes.POINTER(_cs.c_int))
def glXQueryContextInfoEXT(dpy,context,attribute,value):pass
| gpl-2.0 |
onebit0fme/conveyance-tornado | examples.py | 1 | 4899 |
PAYLOAD_GET_EXAMPLE = {
"compose": {
"body": {
"type": "object",
"value": {
"response_code": {
"type": "integer",
"value": "@question.$resp.$$status_code",
},
"answer_ids": {
"type": "array",
"value": "@answers",
},
"public": {
"type": "boolean",
"value": "@question.$resp.is_public",
},
"id": {
"type": "integer",
"value": "@question.$resp.id"
}
}
}
},
"resources": {
"post": {
"url": {
"protocol": "http",
"hostname": "jsonplaceholder.typicode.com",
# "path": "/posts/{$post_id}" # TODO: handle references inside strings
"path": "/posts/1"
},
"method": "GET",
"headers": {
"Content-Type": "application/json"
},
},
"comments": {
"url": {
"hostname": "@post.url.hostname",
"protocol": "@post.url.protocol",
"path": "/comments"
},
"method": "GET",
"headers": {
"Content-Type": "application/json"
},
"parameters": {
"post_id": "$post_id"
}
}
},
"definitions": {
"post_id": {
# "type": 'integer',
"value": 1,
"schema": {
"type": "integer"
}
},
"text_obj": {
# "type": 'object',
"value": {
"id": 1,
"name": "Jeronimo"
}
},
"how_to": {
# "type": 'string',
"value": '$this.$do.$$that',
"verbatim": False,
"default": "This is default value"
},
"test": {
# "type": 'object',
"value": '$text_obj.name',
"schema": {
"type": "string"
}
},
"object": {
"value": {
"id": "$post_id",
"name": "$how_to"
},
"schema": {
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "string"
}
}
}
}
}
}
PAYLOAD_GET_EXAMPLE_v2 = {
"compose": {
"body": {
"type": "object",
"value": {
"POST": "$post",
"COMMENTS": "$comments"
}
}
},
"resources": {
"post": {
"url": {
"protocol": "http",
"hostname": "jsonplaceholder.typicode.com",
"path": "/posts/1"
},
"method": "GET",
"headers": {
"Content-Type": "application/json"
},
},
"comments": {
"url": {
"hostname": "@post.url.hostname",
"protocol": "@post.url.protocol",
"path": "/posts/1/comments"
},
"method": "GET",
"headers": {
"Content-Type": "application/json"
},
"parameters": {
"post_id": "$post_id"
}
}
},
"definitions": {
"post": {
"value": "@post.$resp"
},
"comments": {
"value": "@comments.$resp"
}
}
}
PAYLOAD_GET_EXAMPLE_3 = {
"compose": {
"body": {
"type": "object",
"value": {
"POST": "@post.$resp",
"USER": "@user.$resp"
}
}
},
"resources": {
"post": {
"url": {
"protocol": "http",
"hostname": "jsonplaceholder.typicode.com",
"path": "/posts/{$post_id}"
},
"method": "GET",
"headers": {
"Content-Type": "application/json"
}
},
"user": {
"url": {
"hostname": "@post.url.hostname",
"protocol": "@post.url.protocol",
"path": "/users/{@post.$resp.userId}"
},
"method": "GET",
"headers": {
"Content-Type": "application/json"
},
"parameters": {
"post_id": "$post_id"
}
}
},
"definitions": {
"post_id": {
"value": 1
}
}
}
| gpl-2.0 |
nipunbatra/bayespy | bayespy/inference/vmp/nodes/dot.py | 2 | 18959 | ######################################################################
# Copyright (C) 2011-2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import numpy as np
from bayespy.utils import utils
from .node import Node
from .deterministic import Deterministic
from .gaussian import Gaussian, GaussianMoments
class SumMultiply(Deterministic):
"""
Compute the sum-product of Gaussian nodes similarly to numpy.einsum.
For instance, the equivalent of
np.einsum('abc,bd,ca->da', X, Y, Z)
would be given as
SumMultiply('abc,bd,ca->da', X, Y, Z)
or
SumMultiply(X, [0,1,2], Y, [1,3], Z, [2,0], [3,0])
which is similar to the other syntax of numpy.einsum.
This node operates similarly as numpy.einsum. However, you must use all the
elements of each node, that is, an operation like np.einsum('ii->i',X) is
not allowed. Thus, for each node, each axis must be given unique id. The id
identifies which axes correspond to which axes between the different
nodes. Also, Ellipsis ('...') is not yet supported for simplicity. It would
also have some problems with constant inputs (because how to determine
ndim), so let us just forget it for now.
Each output axis must appear in the input mappings.
The keys must refer to variable dimension axes only, not plate axes.
The convenient string notation of numpy.einsum is not yet implemented.
Examples
--------
Sum over the rows:
'ij->j'
Inner product of three vectors:
'i,i,i'
Matrix-vector product:
'ij,j->i'
Matrix-matrix product:
'ik,kj->ij'
Outer product:
'i,j->ij'
Vector-matrix-vector product:
'i,ij,j'
Note
----
This operation can be extremely slow if not used wisely. For large and
complex operations, it is sometimes more efficient to split the operation
into multiple nodes. For instance, the example above could probably be
computed faster by
XZ = SumMultiply(X, [0,1,2], Z, [2,0], [0,1])
SumMultiply(XZ, [0,1], Y, [1,2], [2,0])
because the third axis ('c') could be summed out already in the first
operation. This same effect applies also to numpy.einsum in general.
"""
def __init__(self, *args, iterator_axis=None, **kwargs):
"""
SumMultiply(Node1, map1, Node2, map2, ..., NodeN, mapN [, map_out])
"""
args = list(args)
if len(args) < 2:
raise ValueError("Not enough inputs")
if iterator_axis is not None:
raise NotImplementedError("Iterator axis not implemented yet")
if iterator_axis is not None and not isinstance(iterator_axis, int):
raise ValueError("Iterator axis must be integer")
# Two different parsing methods, depends on how the arguments are given
if utils.is_string(args[0]):
# This is the format:
# SumMultiply('ik,k,kj->ij', X, Y, Z)
strings = args[0]
nodes = args[1:]
# Remove whitespace
strings = utils.remove_whitespace(strings)
# Split on '->' (should contain only one '->' or none)
strings = strings.split('->')
if len(strings) > 2:
raise ValueError('The string contains too many ->')
strings_in = strings[0]
if len(strings) == 2:
string_out = strings[1]
else:
string_out = ''
# Split former part on ',' (the number of parts should be equal to
# nodes)
strings_in = strings_in.split(',')
if len(strings_in) != len(nodes):
raise ValueError('Number of given input nodes is different '
'from the input keys in the string')
# Split strings into key lists using single character keys
keysets = [list(string_in) for string_in in strings_in]
keys_out = list(string_out)
else:
# This is the format:
# SumMultiply(X, [0,2], Y, [2], Z, [2,1], [0,1])
# If given, the output mapping is the last argument
if len(args) % 2 == 0:
keys_out = []
else:
keys_out = args.pop(-1)
# Node and axis mapping are given in turns
nodes = args[::2]
keysets = args[1::2]
# Find all the keys (store only once each)
full_keyset = []
for keyset in keysets:
full_keyset += keyset
#full_keyset += list(keyset.keys())
full_keyset = list(set(full_keyset))
# Input and output messages are Gaussian
self._moments = GaussianMoments(len(keys_out))
self._parent_moments = [GaussianMoments(len(keyset))
for keyset in keysets]
#
# Check the validity of each node
#
for n in range(len(nodes)):
# Convert constant arrays to constant nodes
nodes[n] = self._ensure_moments(nodes[n],
self._parent_moments[n])
# Check that the maps and the size of the variable are consistent
if len(nodes[n].dims[0]) != len(keysets[n]):
raise ValueError("Wrong number of keys (%d) for the node "
"number %d with %d dimensions"
% (len(keysets[n]),
n,
len(nodes[n].dims[0])))
# Check that the keys are unique
if len(set(keysets[n])) != len(keysets[n]):
raise ValueError("Axis keys for node number %d are not unique"
% n)
# Check that the dims are proper Gaussians
if len(nodes[n].dims) != 2:
raise ValueError("Node %d is not Gaussian" % n)
if nodes[n].dims[0] + nodes[n].dims[0] != nodes[n].dims[1]:
raise ValueError("Node %d is not Gaussian" % n)
# Check the validity of output keys: each output key must be included in
# the input keys
if len(keys_out) != len(set(keys_out)):
raise ValueError("Output keys are not unique")
for key in keys_out:
if key not in full_keyset:
raise ValueError("Output key %s does not appear in any input"
% key)
# Check the validity of the nodes with respect to the key mapping.
# Check that the node dimensions map and broadcast properly, that is,
# all the nodes using the same key for axes must have equal size for
# those axes (or size 1).
broadcasted_size = {}
for key in full_keyset:
broadcasted_size[key] = 1
for (node, keyset) in zip(nodes, keysets):
try:
# Find the axis for the key
index = keyset.index(key)
except ValueError:
# OK, this node doesn't use this key for any axis
pass
else:
# Length of the axis for that key
node_size = node.dims[0][index]
if node_size != broadcasted_size[key]:
if broadcasted_size[key] == 1:
# Apply broadcasting
broadcasted_size[key] = node_size
elif node_size != 1:
# Different sizes and neither has size 1
raise ValueError("Axes using key %s do not "
"broadcast properly"
% key)
# Compute the shape of the output
dim0 = [broadcasted_size[key] for key in keys_out]
dim1 = dim0 + dim0
# Rename the keys to [0,1,...,N-1] where N is the total number of keys
self.N_keys = len(full_keyset)
self.out_keys = [full_keyset.index(key) for key in keys_out]
self.in_keys = [ [full_keyset.index(key) for key in keyset]
for keyset in keysets ]
super().__init__(*nodes,
dims=(tuple(dim0),tuple(dim1)),
**kwargs)
def _compute_moments(self, *u_parents):
# Compute the number of plate axes for each node
plate_counts0 = [(np.ndim(u_parent[0]) - len(keys))
for (keys,u_parent) in zip(self.in_keys, u_parents)]
plate_counts1 = [(np.ndim(u_parent[1]) - 2*len(keys))
for (keys,u_parent) in zip(self.in_keys, u_parents)]
# The number of plate axes for the output
N0 = max(plate_counts0)
N1 = max(plate_counts1)
# The total number of unique keys used (keys are 0,1,...,N_keys-1)
D = self.N_keys
#
# Compute the mean
#
out_all_keys = list(range(D+N0-1, D-1, -1)) + self.out_keys
#nodes_dim_keys = self.nodes_dim_keys
in_all_keys = [list(range(D+plate_count-1, D-1, -1)) + keys
for (plate_count, keys) in zip(plate_counts0,
self.in_keys)]
u0 = [u[0] for u in u_parents]
args = utils.zipper_merge(u0, in_all_keys) + [out_all_keys]
x0 = np.einsum(*args)
#
# Compute the covariance
#
out_all_keys = (list(range(2*D+N1-1, 2*D-1, -1))
+ [D+key for key in self.out_keys]
+ self.out_keys)
in_all_keys = [list(range(2*D+plate_count-1, 2*D-1, -1))
+ [D+key for key in node_keys]
+ node_keys
for (plate_count, node_keys) in zip(plate_counts1,
self.in_keys)]
u1 = [u[1] for u in u_parents]
args = utils.zipper_merge(u1, in_all_keys) + [out_all_keys]
x1 = np.einsum(*args)
return [x0, x1]
def get_parameters(self):
# Compute mean and variance
u = self.get_moments()
u[1] -= u[0]**2
return u
def _message_to_parent(self, index):
"""
Compute the message and mask to a parent node.
"""
# Check index
if index >= len(self.parents):
raise ValueError("Parent index larger than the number of parents")
# Get messages from other parents and children
u_parents = self._message_from_parents(exclude=index)
m = self._message_from_children()
mask = self.mask
# Normally we don't need to care about masks when computing the
# message. However, in this node we want to avoid computing huge message
# arrays so we sum some axes already here. Thus, we need to apply the
# mask.
parent = self.parents[index]
#
# Compute the first message
#
msg = [None, None]
# Compute the two messages
for ind in range(2):
# The total number of keys for the non-plate dimensions
N = (ind+1) * self.N_keys
# Add an array of ones to ensure proper shape and number of
# plates. Note that this adds an axis for each plate. At the end, we
# want to remove axes that were created only because of this
parent_num_dims = len(parent.dims[ind])
parent_num_plates = len(parent.plates)
parent_plate_keys = list(range(N + parent_num_plates,
N,
-1))
parent_dim_keys = self.in_keys[index]
if ind == 1:
parent_dim_keys = ([key + self.N_keys
for key in self.in_keys[index]]
+ parent_dim_keys)
args = []
args.append(np.ones((1,)*parent_num_plates + parent.dims[ind]))
args.append(parent_plate_keys + parent_dim_keys)
# This variable counts the maximum number of plates of the
# arguments, thus it will tell the number of plates in the result
# (if the artificially added plates above were ignored).
result_num_plates = 0
result_plates = ()
# Mask and its keysr
mask_num_plates = np.ndim(mask)
mask_plates = np.shape(mask)
mask_plate_keys = list(range(N + mask_num_plates,
N,
-1))
result_num_plates = max(result_num_plates,
mask_num_plates)
result_plates = utils.broadcasted_shape(result_plates,
mask_plates)
args.append(mask)
args.append(mask_plate_keys)
# Moments and keys of other parents
for (k, u) in enumerate(u_parents):
if k != index:
num_dims = (ind+1) * len(self.in_keys[k])
num_plates = np.ndim(u[ind]) - num_dims
plates = np.shape(u[ind])[:num_plates]
plate_keys = list(range(N + num_plates,
N,
-1))
dim_keys = self.in_keys[k]
if ind == 1:
dim_keys = ([key + self.N_keys
for key in self.in_keys[k]]
+ dim_keys)
args.append(u[ind])
args.append(plate_keys + dim_keys)
result_num_plates = max(result_num_plates, num_plates)
result_plates = utils.broadcasted_shape(result_plates,
plates)
# Message and keys from children
child_num_dims = (ind+1) * len(self.out_keys)
child_num_plates = np.ndim(m[ind]) - child_num_dims
child_plates = np.shape(m[ind])[:child_num_plates]
child_plate_keys = list(range(N + child_num_plates,
N,
-1))
child_dim_keys = self.out_keys
if ind == 1:
child_dim_keys = ([key + self.N_keys
for key in self.out_keys]
+ child_dim_keys)
args.append(m[ind])
args.append(child_plate_keys + child_dim_keys)
result_num_plates = max(result_num_plates, child_num_plates)
result_plates = utils.broadcasted_shape(result_plates,
child_plates)
# Output keys, that is, the keys of the parent[index]
parent_keys = parent_plate_keys + parent_dim_keys
# Performance trick: Check which axes can be summed because they
# have length 1 or are non-existing in parent[index]. Thus, remove
# keys corresponding to unit length axes in parent[index] so that
# einsum sums over those axes. After computations, these axes must
# be added back in order to get the correct shape for the message.
parent_shape = parent.get_shape(ind)
removed_axes = []
for j in range(len(parent_keys)):
if parent_shape[j] == 1:
# Remove the key (take into account the number of keys that
# have already been removed)
del parent_keys[j-len(removed_axes)]
removed_axes.append(j)
args.append(parent_keys)
# THE BEEF: Compute the message
msg[ind] = np.einsum(*args)
# Find the correct shape for the message array
message_shape = list(np.shape(msg[ind]))
# First, add back the axes with length 1
for ax in removed_axes:
message_shape.insert(ax, 1)
# Second, remove leading axes for plates that were not present in
# the child nor other parents' messages. This is not really
# necessary, but it is just elegant to remove the leading unit
# length axes that we added artificially at the beginning just
# because we wanted the key mapping to be simple.
if parent_num_plates > result_num_plates:
del message_shape[:(parent_num_plates-result_num_plates)]
# Then, the actual reshaping
msg[ind] = np.reshape(msg[ind], message_shape)
# Apply plate multiplier: If this node has non-unit plates that are
# unit plates in the parent, those plates are summed. However, if
# the message has unit axis for that plate, it should be first
# broadcasted to the plates of this node and then summed to the
# plates of the parent. In order to avoid this broadcasting and
# summing, it is more efficient to just multiply by the correct
# factor.
r = self._plate_multiplier(self.plates,
result_plates,
parent.plates)
if r != 1:
msg[ind] *= r
return msg
def Dot(*args, **kwargs):
"""
Node for computing inner product of several Gaussian vectors.
This is a simple wrapper of the much more general SumMultiply. For now, it
is here for backward compatibility.
"""
einsum = 'i' + ',i'*(len(args)-1)
return SumMultiply(einsum, *args, **kwargs)
| gpl-3.0 |
dkerwin/ansible-modules-core | web_infrastructure/apache2_module.py | 35 | 3232 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apache2_module
version_added: 1.6
author: "Christian Berendt (@berendt)"
short_description: enables/disables a module of the Apache2 webserver
description:
- Enables or disables a specified module of the Apache2 webserver.
options:
name:
description:
- name of the module to enable/disable
required: true
state:
description:
- indicate the desired state of the resource
choices: ['present', 'absent']
default: present
requirements: ["a2enmod","a2dismod"]
'''
EXAMPLES = '''
# enables the Apache2 module "wsgi"
- apache2_module: state=present name=wsgi
# disables the Apache2 module "wsgi"
- apache2_module: state=absent name=wsgi
'''
import re
def _disable_module(module):
name = module.params['name']
a2dismod_binary = module.get_bin_path("a2dismod")
if a2dismod_binary is None:
module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache")
result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name))
if re.match(r'.*\b' + name + r' already disabled', stdout, re.S|re.M):
module.exit_json(changed = False, result = "Success")
elif result != 0:
module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout))
else:
module.exit_json(changed = True, result = "Disabled")
def _enable_module(module):
name = module.params['name']
a2enmod_binary = module.get_bin_path("a2enmod")
if a2enmod_binary is None:
module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache")
result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name))
if re.match(r'.*\b' + name + r' already enabled', stdout, re.S|re.M):
module.exit_json(changed = False, result = "Success")
elif result != 0:
module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout))
else:
module.exit_json(changed = True, result = "Enabled")
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
),
)
if module.params['state'] == 'present':
_enable_module(module)
if module.params['state'] == 'absent':
_disable_module(module)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sestrella/ansible | lib/ansible/module_utils/facts/compat.py | 147 | 4115 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts import default_collectors
from ansible.module_utils.facts import ansible_collector
def get_all_facts(module):
'''compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method
Expects module to be an instance of AnsibleModule, with a 'gather_subset' param.
returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
the fact value.'''
gather_subset = module.params['gather_subset']
return ansible_facts(module, gather_subset=gather_subset)
def ansible_facts(module, gather_subset=None):
'''Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method
2.3/2.3 expects a gather_subset arg.
2.0/2.1 does not except a gather_subset arg
So make gather_subsets an optional arg, defaulting to configured DEFAULT_GATHER_TIMEOUT
'module' should be an instance of an AnsibleModule.
returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
the fact value.
'''
gather_subset = gather_subset or module.params.get('gather_subset', ['all'])
gather_timeout = module.params.get('gather_timeout', 10)
filter_spec = module.params.get('filter', '*')
minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
'distribution', 'dns', 'env', 'fips', 'local',
'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
'service_mgr', 'ssh_pub_keys', 'user'])
all_collector_classes = default_collectors.collectors
# don't add a prefix
namespace = PrefixFactNamespace(namespace_name='ansible', prefix='')
fact_collector = \
ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
namespace=namespace,
filter_spec=filter_spec,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
minimal_gather_subset=minimal_gather_subset)
facts_dict = fact_collector.collect(module=module)
return facts_dict
| gpl-3.0 |
clairetang6/bokeh | examples/plotting/file/unemployment.py | 2 | 1774 | from math import pi
from bokeh.models import HoverTool
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.sampledata.unemployment1948 import data
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
source = ColumnDataSource(
data=dict(month=month, year=year, color=color, rate=rate)
)
TOOLS = "resize,hover,save,pan,box_zoom,wheel_zoom"
p = figure(title="US Unemployment (1948 - 2013)",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
toolbar_location="left", tools=TOOLS)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = pi/3
p.rect("year", "month", 1, 1, source=source,
color="color", line_color=None)
p.select_one(HoverTool).tooltips = [
('date', '@month @year'),
('rate', '@rate'),
]
output_file('unemployment.html', title="unemployment.py example")
show(p) # show the plot
| bsd-3-clause |
vijayanandnandam/youtube-dl | youtube_dl/extractor/playfm.py | 207 | 2599 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
)
class PlayFMIE(InfoExtractor):
IE_NAME = 'play.fm'
_VALID_URL = r'https?://(?:www\.)?play\.fm/(?P<slug>(?:[^/]+/)+(?P<id>[^/]+))/?(?:$|[?#])'
_TEST = {
'url': 'https://www.play.fm/dan-drastic/sven-tasnadi-leipzig-electronic-music-batofar-paris-fr-2014-07-12',
'md5': 'c505f8307825a245d0c7ad1850001f22',
'info_dict': {
'id': '71276',
'ext': 'mp3',
'title': 'Sven Tasnadi - LEIPZIG ELECTRONIC MUSIC @ Batofar (Paris,FR) - 2014-07-12',
'description': '',
'duration': 5627,
'timestamp': 1406033781,
'upload_date': '20140722',
'uploader': 'Dan Drastic',
'uploader_id': '71170',
'view_count': int,
'comment_count': int,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
slug = mobj.group('slug')
recordings = self._download_json(
'http://v2api.play.fm/recordings/slug/%s' % slug, video_id)
error = recordings.get('error')
if isinstance(error, dict):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error.get('message')),
expected=True)
audio_url = recordings['audio']
video_id = compat_str(recordings.get('id') or video_id)
title = recordings['title']
description = recordings.get('description')
duration = int_or_none(recordings.get('recordingDuration'))
timestamp = parse_iso8601(recordings.get('created_at'))
uploader = recordings.get('page', {}).get('title')
uploader_id = compat_str(recordings.get('page', {}).get('id'))
view_count = int_or_none(recordings.get('playCount'))
comment_count = int_or_none(recordings.get('commentCount'))
categories = [tag['name'] for tag in recordings.get('tags', []) if tag.get('name')]
return {
'id': video_id,
'url': audio_url,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
}
| unlicense |
RomainBrault/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
EmreAtes/spack | var/spack/repos/builtin/packages/r-shortread/package.py | 3 | 2764 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RShortread(RPackage):
"""This package implements sampling, iteration, and input of FASTQ
files. The package includes functions for filtering and trimming
reads, and for generating a quality assessment report. Data are
represented as DNAStringSet-derived objects, and easily manipulated
for a diversity of purposes. The package also contains legacy support
for early single-end, ungapped alignment formats."""
homepage = "https://www.bioconductor.org/packages/ShortRead/"
url = "https://git.bioconductor.org/packages/ShortRead"
version('1.34.2', git='https://git.bioconductor.org/packages/ShortRead', commit='25daac63b301df66a8ef6e98cc2977522c6786cd')
depends_on('r@3.4.0:3.4.9', when='@1.34.2')
depends_on('r-latticeextra', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-hwriter', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
| lgpl-2.1 |
WhileLoop/ansible-modules-extras | network/ipify_facts.py | 9 | 3185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ipify_facts
short_description: Retrieve the public IP of your internet gateway.
description:
- If behind NAT and need to know the public IP of your internet gateway.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
api_url:
description:
- URL of the ipify.org API service.
- C(?format=json) will be appended per default.
required: false
default: 'https://api.ipify.org'
timeout:
description:
- HTTP connection timeout in seconds.
required: false
default: 10
version_added: "2.3"
notes:
- "Visit https://www.ipify.org to get more information."
'''
EXAMPLES = '''
# Gather IP facts from ipify.org
- name: get my public IP
ipify_facts:
# Gather IP facts from your own ipify service endpoint with a custom timeout
- name: get my public IP
ipify_facts:
api_url: http://api.example.com/ipify
timeout: 20
'''
RETURN = '''
---
ipify_public_ip:
description: Public IP of the internet gateway.
returned: success
type: string
sample: 1.2.3.4
'''
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class IpifyFacts(object):
def __init__(self):
self.api_url = module.params.get('api_url')
self.timeout = module.params.get('timeout')
def run(self):
result = {
'ipify_public_ip': None
}
(response, info) = fetch_url(module=module, url=self.api_url + "?format=json" , force=True, timeout=self.timeout)
if not response:
module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
data = json.loads(response.read())
result['ipify_public_ip'] = data.get('ip')
return result
def main():
global module
module = AnsibleModule(
argument_spec = dict(
api_url=dict(default='https://api.ipify.org'),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
ipify_facts = IpifyFacts().run()
ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
module.exit_json(**ipify_facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
aboutsajjad/Bridge | app_packages/youtube_dl/extractor/pinkbike.py | 62 | 3446 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
remove_end,
remove_start,
str_to_int,
unified_strdate,
)
class PinkbikeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?pinkbike\.com/video/|es\.pinkbike\.org/i/kvid/kvid-y5\.swf\?id=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.pinkbike.com/video/402811/',
'md5': '4814b8ca7651034cd87e3361d5c2155a',
'info_dict': {
'id': '402811',
'ext': 'mp4',
'title': 'Brandon Semenuk - RAW 100',
'description': 'Official release: www.redbull.ca/rupertwalker',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 100,
'upload_date': '20150406',
'uploader': 'revelco',
'location': 'Victoria, British Columbia, Canada',
'view_count': int,
'comment_count': int,
}
}, {
'url': 'http://es.pinkbike.org/i/kvid/kvid-y5.swf?id=406629',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.pinkbike.com/video/%s' % video_id, video_id)
formats = []
for _, format_id, src in re.findall(
r'data-quality=((?:\\)?["\'])(.+?)\1[^>]+src=\1(.+?)\1', webpage):
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None))
formats.append({
'url': src,
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
title = remove_end(self._og_search_title(webpage), ' Video - Pinkbike')
description = self._html_search_regex(
r'(?s)id="media-description"[^>]*>(.+?)<',
webpage, 'description', default=None) or remove_start(
self._og_search_description(webpage), title + '. ')
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._html_search_meta(
'video:duration', webpage, 'duration'))
uploader = self._search_regex(
r'<a[^>]+\brel=["\']author[^>]+>([^<]+)', webpage,
'uploader', fatal=False)
upload_date = unified_strdate(self._search_regex(
r'class="fullTime"[^>]+title="([^"]+)"',
webpage, 'upload date', fatal=False))
location = self._html_search_regex(
r'(?s)<dt>Location</dt>\s*<dd>(.+?)<',
webpage, 'location', fatal=False)
def extract_count(webpage, label):
return str_to_int(self._search_regex(
r'<span[^>]+class="stat-num"[^>]*>([\d,.]+)</span>\s*<span[^>]+class="stat-label"[^>]*>%s' % label,
webpage, label, fatal=False))
view_count = extract_count(webpage, 'Views')
comment_count = extract_count(webpage, 'Comments')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader': uploader,
'location': location,
'view_count': view_count,
'comment_count': comment_count,
'formats': formats
}
| mit |
rajashreer7/autotest-client-tests | linux-tools/harfbuzz/harfbuzz.py | 4 | 1246 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class harfbuzz(test.test):
"""
Autotest module for testing basic functionality
of harfbuzz
@author Ramesh YR, rameshyr@linux.vnet.ibm.com ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./harfbuzz.sh'], cwd="%s/harfbuzz" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
lodemo/CATANA | src/face_recognition/youtube_dl/extractor/daum.py | 52 | 11802 | # coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
int_or_none,
str_to_int,
xpath_text,
unescapeHTML,
)
class DaumIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
'info_dict': {
'id': 'vab4dyeDBysyBssyukBUjBz',
'ext': 'mp4',
'title': '마크 헌트 vs 안토니오 실바',
'description': 'Mark Hunt vs Antonio Silva',
'upload_date': '20131217',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'duration': 2117,
'view_count': int,
'comment_count': int,
},
}, {
'url': 'http://m.tvpot.daum.net/v/65139429',
'info_dict': {
'id': '65139429',
'ext': 'mp4',
'title': '1297회, \'아빠 아들로 태어나길 잘 했어\' 민수, 감동의 눈물[아빠 어디가] 20150118',
'description': 'md5:79794514261164ff27e36a21ad229fc5',
'upload_date': '20150604',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'duration': 154,
'view_count': int,
'comment_count': int,
},
}, {
'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
'only_matching': True,
}, {
'url': 'http://videofarm.daum.net/controller/player/VodPlayer.swf?vid=vwIpVpCQsT8%24&ref=',
'info_dict': {
'id': 'vwIpVpCQsT8$',
'ext': 'flv',
'title': '01-Korean War ( Trouble on the horizon )',
'description': '\nKorean War 01\nTrouble on the horizon\n전쟁의 먹구름',
'upload_date': '20080223',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'duration': 249,
'view_count': int,
'comment_count': int,
},
}, {
# Requires dte_type=WEB (#9972)
'url': 'http://tvpot.daum.net/v/s3794Uf1NZeZ1qMpGpeqeRU',
'md5': 'a8917742069a4dd442516b86e7d66529',
'info_dict': {
'id': 's3794Uf1NZeZ1qMpGpeqeRU',
'ext': 'mp4',
'title': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny) [쇼! 음악중심] 508회 20160611',
'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\n\n[쇼! 음악중심] 20160611, 507회',
'upload_date': '20160611',
},
}]
def _real_extract(self, url):
video_id = compat_urllib_parse_unquote(self._match_id(url))
movie_data = self._download_json(
'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json',
video_id, 'Downloading video formats info', query={'vid': video_id, 'dte_type': 'WEB'})
# For urls like http://m.tvpot.daum.net/v/65139429, where the video_id is really a clipid
if not movie_data.get('output_list', {}).get('output_list') and re.match(r'^\d+$', video_id):
return self.url_result('http://tvpot.daum.net/clip/ClipView.do?clipid=%s' % video_id)
info = self._download_xml(
'http://tvpot.daum.net/clip/ClipInfoXml.do', video_id,
'Downloading video info', query={'vid': video_id})
formats = []
for format_el in movie_data['output_list']['output_list']:
profile = format_el['profile']
format_query = compat_urllib_parse_urlencode({
'vid': video_id,
'profile': profile,
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
video_id, note='Downloading video data for %s format' % profile)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,
'format_id': profile,
'width': int_or_none(format_el.get('width')),
'height': int_or_none(format_el.get('height')),
'filesize': int_or_none(format_el.get('filesize')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': info.find('TITLE').text,
'formats': formats,
'thumbnail': xpath_text(info, 'THUMB_URL'),
'description': xpath_text(info, 'CONTENTS'),
'duration': int_or_none(xpath_text(info, 'DURATION')),
'upload_date': info.find('REGDTTM').text[:8],
'view_count': str_to_int(xpath_text(info, 'PLAY_CNT')),
'comment_count': str_to_int(xpath_text(info, 'COMMENT_CNT')),
}
class DaumClipIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:clip/ClipView.(?:do|tv)|mypot/View.do)\?.*?clipid=(?P<id>\d+)'
IE_NAME = 'daum.net:clip'
_URL_TEMPLATE = 'http://tvpot.daum.net/clip/ClipView.do?clipid=%s'
_TESTS = [{
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
'info_dict': {
'id': '52554690',
'ext': 'mp4',
'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
'upload_date': '20130831',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'duration': 3868,
'view_count': int,
},
}, {
'url': 'http://m.tvpot.daum.net/clip/ClipView.tv?clipid=54999425',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if DaumPlaylistIE.suitable(url) or DaumUserIE.suitable(url) else super(DaumClipIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
clip_info = self._download_json(
'http://tvpot.daum.net/mypot/json/GetClipInfo.do?clipid=%s' % video_id,
video_id, 'Downloading clip info')['clip_bean']
return {
'_type': 'url_transparent',
'id': video_id,
'url': 'http://tvpot.daum.net/v/%s' % clip_info['vid'],
'title': unescapeHTML(clip_info['title']),
'thumbnail': clip_info.get('thumb_url'),
'description': clip_info.get('contents'),
'duration': int_or_none(clip_info.get('duration')),
'upload_date': clip_info.get('up_date')[:8],
'view_count': int_or_none(clip_info.get('play_count')),
'ie_key': 'Daum',
}
class DaumListIE(InfoExtractor):
def _get_entries(self, list_id, list_id_type):
name = None
entries = []
for pagenum in itertools.count(1):
list_info = self._download_json(
'http://tvpot.daum.net/mypot/json/GetClipInfo.do?size=48&init=true&order=date&page=%d&%s=%s' % (
pagenum, list_id_type, list_id), list_id, 'Downloading list info - %s' % pagenum)
entries.extend([
self.url_result(
'http://tvpot.daum.net/v/%s' % clip['vid'])
for clip in list_info['clip_list']
])
if not name:
name = list_info.get('playlist_bean', {}).get('name') or \
list_info.get('potInfo', {}).get('name')
if not list_info.get('has_more'):
break
return name, entries
def _check_clip(self, url, list_id):
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
if 'clipid' in query_dict:
clip_id = query_dict['clipid'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % clip_id)
return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip')
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % list_id)
class DaumPlaylistIE(DaumListIE):
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View\.do|Top\.tv)\?.*?playlistid=(?P<id>[0-9]+)'
IE_NAME = 'daum.net:playlist'
_URL_TEMPLATE = 'http://tvpot.daum.net/mypot/View.do?playlistid=%s'
_TESTS = [{
'note': 'Playlist url with clipid',
'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844',
'info_dict': {
'id': '6213966',
'title': 'Woorissica Official',
},
'playlist_mincount': 181
}, {
'note': 'Playlist url with clipid - noplaylist',
'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844',
'info_dict': {
'id': '73806844',
'ext': 'mp4',
'title': '151017 Airport',
'upload_date': '20160117',
},
'params': {
'noplaylist': True,
'skip_download': True,
}
}]
@classmethod
def suitable(cls, url):
return False if DaumUserIE.suitable(url) else super(DaumPlaylistIE, cls).suitable(url)
def _real_extract(self, url):
list_id = self._match_id(url)
clip_result = self._check_clip(url, list_id)
if clip_result:
return clip_result
name, entries = self._get_entries(list_id, 'playlistid')
return self.playlist_result(entries, list_id, name)
class DaumUserIE(DaumListIE):
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View|Top)\.(?:do|tv)\?.*?ownerid=(?P<id>[0-9a-zA-Z]+)'
IE_NAME = 'daum.net:user'
_TESTS = [{
'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0',
'info_dict': {
'id': 'o2scDLIVbHc0',
'title': '마이 리틀 텔레비전',
},
'playlist_mincount': 213
}, {
'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&clipid=73801156',
'info_dict': {
'id': '73801156',
'ext': 'mp4',
'title': '[미공개] 김구라, 오만석이 부릅니다 \'오케피\' - 마이 리틀 텔레비전 20160116',
'upload_date': '20160117',
'description': 'md5:5e91d2d6747f53575badd24bd62b9f36'
},
'params': {
'noplaylist': True,
'skip_download': True,
}
}, {
'note': 'Playlist url has ownerid and playlistid, playlistid takes precedence',
'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&playlistid=6196631',
'info_dict': {
'id': '6196631',
'title': '마이 리틀 텔레비전 - 20160109',
},
'playlist_count': 11
}, {
'url': 'http://tvpot.daum.net/mypot/Top.do?ownerid=o2scDLIVbHc0',
'only_matching': True,
}, {
'url': 'http://m.tvpot.daum.net/mypot/Top.tv?ownerid=45x1okb1If50&playlistid=3569733',
'only_matching': True,
}]
def _real_extract(self, url):
list_id = self._match_id(url)
clip_result = self._check_clip(url, list_id)
if clip_result:
return clip_result
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
if 'playlistid' in query_dict:
playlist_id = query_dict['playlistid'][0]
return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist')
name, entries = self._get_entries(list_id, 'ownerid')
return self.playlist_result(entries, list_id, name)
| mit |
brettc/bricolage | tests/test_core.py | 1 | 6385 | import bricolage.core as T
import cPickle as pickle
import pathlib
import numpy
def make_target1(a, b, c):
f1 = 0.5 if a and b or not c else 1.0
f2 = 1 if ((a or c) and not (a and b)) and b else 0
return f1, f2
def make_target2(a, b, c):
f1 = 0.25 if (a or b) and (not a and not c) else 1.0
f2 = 1 if ((a or b) and not (a and b)) and c else 0
return f1, f2
def bowtie_target(a, b, c):
if (a and not c) or (b and c):
return [1, 0.5, 0.25]
return [0, 0, 0]
def test_world():
cue = 3
reg = 4
out = 3
p = T.Parameters(cue_channels=cue, reg_channels=reg, out_channels=out)
w = T.World(p)
assert w.cue_channels == cue
assert w.reg_channels == reg
assert w.out_channels == out
assert w.channel_count == 2 + cue + reg + out
def test_target():
p = T.Parameters(cue_channels=3, reg_channels=3, out_channels=2)
w = T.World(p)
t = T.DefaultTarget(w, make_target1)
assert t.as_array().shape == (pow(2, 3), 2)
# Default
assert t.weighting == [0.5, 0.5]
t.weighting = [1, 4]
assert t.weighting == [0.2, 0.8]
def test_pickling_world(tmpdir):
tmpdir = pathlib.Path(str(tmpdir))
p = T.Parameters(seed=99, cue_channels=3, reg_channels=3, out_channels=2)
w = T.World(p)
with open(str(tmpdir / "world1.pickle"), "wb") as f:
pickle.dump(w, f, -1)
with open(str(tmpdir / "world1.pickle"), "rb") as f:
w2 = pickle.load(f)
assert dir(w2.params) == dir(w.params)
assert w.cue_channels == w2.cue_channels
assert w.reg_channels == w2.reg_channels
assert w.out_channels == w2.out_channels
assert w.get_random_state() == w2.get_random_state()
assert w.next_network_id == w2.next_network_id
assert w.next_target_id == w2.next_target_id
def test_pickling_default_target(tmpdir):
tmpdir = pathlib.Path(str(tmpdir))
p = T.Parameters(cue_channels=3, reg_channels=3, out_channels=2)
w = T.World(p)
# Now ensure that pickling Targets works too
t1 = T.DefaultTarget(w, make_target1, name="a")
assert t1.scoring_method == T.ScoringMethod.LINEAR
assert t1.strength == 0.0
t2 = T.DefaultTarget(
w,
make_target2,
name="b",
scoring_method=T.ScoringMethod.EXPONENTIAL,
strength=4.0,
)
t2.weighting = [1, 2]
with open(str(tmpdir / "target1.pickle"), "wb") as f:
pickle.dump((t1, t2), f, -1)
with open(str(tmpdir / "target1.pickle"), "rb") as f:
rt1, rt2 = pickle.load(f)
assert (t1.as_array() == rt1.as_array()).all()
assert (t2.as_array() == rt2.as_array()).all()
assert t1.name == rt1.name
assert t2.name == rt2.name
assert t1.identifier == rt1.identifier
assert t2.identifier == rt2.identifier
assert t1.weighting == rt1.weighting
assert t2.weighting == rt2.weighting
assert t1.scoring_method == rt1.scoring_method
assert t2.scoring_method == rt2.scoring_method
assert t1.strength == rt1.strength
assert t2.strength == rt2.strength
def test_pickling_noisy_target(tmpdir):
tmpdir = pathlib.Path(str(tmpdir))
p = T.Parameters(cue_channels=3, reg_channels=3, out_channels=2)
w = T.World(p)
# Now ensure that pickling Targets works too
t1 = T.NoisyTarget(w, make_target1, name="a")
assert t1.scoring_method == T.ScoringMethod.LINEAR
assert t1.strength == 0.0
assert t1.perturb_count == 1
assert t1.perturb_prop == 1.0
assert t1.env_only == True
t2 = T.NoisyTarget(
w, make_target2, name="b", perturb_count=3, perturb_prop=0.5, env_only=False
)
assert t2.perturb_count == 3
assert t2.perturb_prop == 0.5
assert t2.env_only == False
with open(str(tmpdir / "target1.pickle"), "wb") as f:
pickle.dump((t1, t2), f, -1)
with open(str(tmpdir / "target1.pickle"), "rb") as f:
rt1, rt2 = pickle.load(f)
assert (t1.as_array() == rt1.as_array()).all()
assert (t2.as_array() == rt2.as_array()).all()
assert t1.env_only == rt1.env_only
assert t2.env_only == rt2.env_only
assert t1.perturb_count == rt1.perturb_count
assert t2.perturb_count == rt2.perturb_count
assert t1.perturb_prop == rt1.perturb_prop
assert t2.perturb_prop == rt2.perturb_prop
def test_scoring_methods(bowtie_database):
pop = bowtie_database.population
# Use different identifiers to force recalculation
targ1 = T.DefaultTarget(pop.factory.world, bowtie_target, ident=2)
targ2 = T.DefaultTarget(
pop.factory.world,
bowtie_target,
ident=3,
scoring_method=T.ScoringMethod.EXPONENTIAL,
strength=1,
)
targ3 = T.DefaultTarget(
pop.factory.world,
bowtie_target,
ident=4,
scoring_method=T.ScoringMethod.EXPONENTIAL_VEC,
strength=1,
)
f1 = targ1.assess_collection(pop)
f2 = targ2.assess_collection(pop)
f3 = targ3.assess_collection(pop)
ones1 = numpy.where(f1 == 1.0)[0]
ones2 = numpy.where(f2 == 1.0)[0]
ones3 = numpy.where(f3 == 1.0)[0]
assert (ones1 == ones2).all()
assert (ones1 == ones3).all()
def test_channelstate():
p = T.Parameters(cue_channels=3, reg_channels=4, out_channels=3)
w = T.World(p)
e2 = w.environments[-1]
e2_again = w.environments[-1]
# We should get the same channels states out.
assert e2 == e2_again
# assert e2 is e2_again
# When we copy, they should be the same, but not identical.
copy_e2 = e2.copy()
assert e2 == copy_e2
assert e2 is not copy_e2
# Modify the state -- testing still work
copy_e2.flip(0)
assert e2 != copy_e2
copy_e2.flip(0)
assert e2 == copy_e2
def test_random_engine():
p = T.Parameters(cue_channels=3, reg_channels=4, out_channels=3)
w = T.World(p)
w.seed_random_engine(1)
first_time = [w.get_random_double(0, 1) for _ in range(20)]
first_time += [w.get_random_int(0, 100) for _ in range(20)]
w.seed_random_engine(1)
second_time = [w.get_random_double(0, 1) for _ in range(20)]
second_time += [w.get_random_int(0, 100) for _ in range(20)]
assert first_time == second_time
# Now try with state setting
ss = w.get_random_state()
a = [w.get_random_double(0, 1) for _ in range(100)]
w.set_random_state(ss)
b = [w.get_random_double(0, 1) for _ in range(100)]
assert a == b
| gpl-3.0 |
DavidLP/home-assistant | homeassistant/components/fibaro/climate.py | 6 | 10044 | """Support for Fibaro thermostats."""
import logging
from homeassistant.components.climate.const import (
STATE_AUTO, STATE_COOL, STATE_DRY,
STATE_ECO, STATE_FAN_ONLY, STATE_HEAT,
STATE_MANUAL, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_OPERATION_MODE, SUPPORT_FAN_MODE)
from homeassistant.components.climate import (
ClimateDevice)
from homeassistant.const import (
ATTR_TEMPERATURE,
STATE_OFF,
TEMP_CELSIUS,
TEMP_FAHRENHEIT)
from . import (
FIBARO_DEVICES, FibaroDevice)
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
# State definitions missing from HA, but defined by Z-Wave standard.
# We map them to states known supported by HA here:
STATE_AUXILIARY = STATE_HEAT
STATE_RESUME = STATE_HEAT
STATE_MOIST = STATE_DRY
STATE_AUTO_CHANGEOVER = STATE_AUTO
STATE_ENERGY_HEAT = STATE_ECO
STATE_ENERGY_COOL = STATE_COOL
STATE_FULL_POWER = STATE_AUTO
STATE_FORCE_OPEN = STATE_MANUAL
STATE_AWAY = STATE_AUTO
STATE_FURNACE = STATE_HEAT
FAN_AUTO_HIGH = 'auto_high'
FAN_AUTO_MEDIUM = 'auto_medium'
FAN_CIRCULATION = 'circulation'
FAN_HUMIDITY_CIRCULATION = 'humidity_circulation'
FAN_LEFT_RIGHT = 'left_right'
FAN_UP_DOWN = 'up_down'
FAN_QUIET = 'quiet'
_LOGGER = logging.getLogger(__name__)
# SDS13781-10 Z-Wave Application Command Class Specification 2019-01-04
# Table 128, Thermostat Fan Mode Set version 4::Fan Mode encoding
FANMODES = {
0: STATE_OFF,
1: SPEED_LOW,
2: FAN_AUTO_HIGH,
3: SPEED_HIGH,
4: FAN_AUTO_MEDIUM,
5: SPEED_MEDIUM,
6: FAN_CIRCULATION,
7: FAN_HUMIDITY_CIRCULATION,
8: FAN_LEFT_RIGHT,
9: FAN_UP_DOWN,
10: FAN_QUIET,
128: STATE_AUTO
}
# SDS13781-10 Z-Wave Application Command Class Specification 2019-01-04
# Table 130, Thermostat Mode Set version 3::Mode encoding.
OPMODES = {
0: STATE_OFF,
1: STATE_HEAT,
2: STATE_COOL,
3: STATE_AUTO,
4: STATE_AUXILIARY,
5: STATE_RESUME,
6: STATE_FAN_ONLY,
7: STATE_FURNACE,
8: STATE_DRY,
9: STATE_MOIST,
10: STATE_AUTO_CHANGEOVER,
11: STATE_ENERGY_HEAT,
12: STATE_ENERGY_COOL,
13: STATE_AWAY,
15: STATE_FULL_POWER,
31: STATE_FORCE_OPEN
}
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Fibaro controller devices."""
if discovery_info is None:
return
add_entities(
[FibaroThermostat(device)
for device in hass.data[FIBARO_DEVICES]['climate']], True)
class FibaroThermostat(FibaroDevice, ClimateDevice):
"""Representation of a Fibaro Thermostat."""
def __init__(self, fibaro_device):
"""Initialize the Fibaro device."""
super().__init__(fibaro_device)
self._temp_sensor_device = None
self._target_temp_device = None
self._op_mode_device = None
self._fan_mode_device = None
self._support_flags = 0
self.entity_id = 'climate.{}'.format(self.ha_id)
self._fan_mode_to_state = {}
self._fan_state_to_mode = {}
self._op_mode_to_state = {}
self._op_state_to_mode = {}
siblings = fibaro_device.fibaro_controller.get_siblings(
fibaro_device.id)
tempunit = 'C'
for device in siblings:
if device.type == 'com.fibaro.temperatureSensor':
self._temp_sensor_device = FibaroDevice(device)
tempunit = device.properties.unit
if 'setTargetLevel' in device.actions or \
'setThermostatSetpoint' in device.actions:
self._target_temp_device = FibaroDevice(device)
self._support_flags |= SUPPORT_TARGET_TEMPERATURE
tempunit = device.properties.unit
if 'setMode' in device.actions or \
'setOperatingMode' in device.actions:
self._op_mode_device = FibaroDevice(device)
self._support_flags |= SUPPORT_OPERATION_MODE
if 'setFanMode' in device.actions:
self._fan_mode_device = FibaroDevice(device)
self._support_flags |= SUPPORT_FAN_MODE
if tempunit == 'F':
self._unit_of_temp = TEMP_FAHRENHEIT
else:
self._unit_of_temp = TEMP_CELSIUS
if self._fan_mode_device:
fan_modes = self._fan_mode_device.fibaro_device.\
properties.supportedModes.split(",")
for mode in fan_modes:
try:
self._fan_mode_to_state[int(mode)] = FANMODES[int(mode)]
self._fan_state_to_mode[FANMODES[int(mode)]] = int(mode)
except KeyError:
self._fan_mode_to_state[int(mode)] = 'unknown'
if self._op_mode_device:
prop = self._op_mode_device.fibaro_device.properties
if "supportedOperatingModes" in prop:
op_modes = prop.supportedOperatingModes.split(",")
elif "supportedModes" in prop:
op_modes = prop.supportedModes.split(",")
for mode in op_modes:
try:
self._op_mode_to_state[int(mode)] = OPMODES[int(mode)]
self._op_state_to_mode[OPMODES[int(mode)]] = int(mode)
except KeyError:
self._op_mode_to_state[int(mode)] = 'unknown'
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug("Climate %s\n"
"- _temp_sensor_device %s\n"
"- _target_temp_device %s\n"
"- _op_mode_device %s\n"
"- _fan_mode_device %s",
self.ha_id,
self._temp_sensor_device.ha_id
if self._temp_sensor_device else "None",
self._target_temp_device.ha_id
if self._target_temp_device else "None",
self._op_mode_device.ha_id
if self._op_mode_device else "None",
self._fan_mode_device.ha_id
if self._fan_mode_device else "None")
await super().async_added_to_hass()
# Register update callback for child devices
siblings = self.fibaro_device.fibaro_controller.get_siblings(
self.fibaro_device.id)
for device in siblings:
if device != self.fibaro_device:
self.controller.register(device.id,
self._update_callback)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def fan_list(self):
"""Return the list of available fan modes."""
if self._fan_mode_device is None:
return None
return list(self._fan_state_to_mode)
@property
def current_fan_mode(self):
"""Return the fan setting."""
if self._fan_mode_device is None:
return None
mode = int(self._fan_mode_device.fibaro_device.properties.mode)
return self._fan_mode_to_state[mode]
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if self._fan_mode_device is None:
return
self._fan_mode_device.action(
"setFanMode", self._fan_state_to_mode[fan_mode])
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self._op_mode_device is None:
return None
if "operatingMode" in self._op_mode_device.fibaro_device.properties:
mode = int(self._op_mode_device.fibaro_device.
properties.operatingMode)
else:
mode = int(self._op_mode_device.fibaro_device.properties.mode)
return self._op_mode_to_state.get(mode)
@property
def operation_list(self):
"""Return the list of available operation modes."""
if self._op_mode_device is None:
return None
return list(self._op_state_to_mode)
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
if self._op_mode_device is None:
return
if "setOperatingMode" in self._op_mode_device.fibaro_device.actions:
self._op_mode_device.action(
"setOperatingMode", self._op_state_to_mode[operation_mode])
elif "setMode" in self._op_mode_device.fibaro_device.actions:
self._op_mode_device.action(
"setMode", self._op_state_to_mode[operation_mode])
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_temp
@property
def current_temperature(self):
"""Return the current temperature."""
if self._temp_sensor_device:
device = self._temp_sensor_device.fibaro_device
return float(device.properties.value)
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._target_temp_device:
device = self._target_temp_device.fibaro_device
return float(device.properties.targetLevel)
return None
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temperature = kwargs.get(ATTR_TEMPERATURE)
target = self._target_temp_device
if temperature is not None:
if "setThermostatSetpoint" in target.fibaro_device.actions:
target.action("setThermostatSetpoint",
self._op_state_to_mode[self.current_operation],
temperature)
else:
target.action("setTargetLevel",
temperature)
@property
def is_on(self):
"""Return true if on."""
if self.current_operation == STATE_OFF:
return False
return True
| apache-2.0 |
NicholasPace/android_kernel_motorola_msm8226 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
dharmabumstead/ansible | contrib/inventory/infoblox.py | 110 | 3600 | #!/usr/bin/env python
#
# (c) 2018, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import json
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.module_utils.net_tools.nios.api import WapiInventory
from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
CONFIG_FILES = [
'/etc/ansible/infoblox.yaml',
'/etc/ansible/infoblox.yml'
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true',
help='List host records from NIOS for use in Ansible')
parser.add_argument('--host',
help='List meta data about single host (not used)')
return parser.parse_args()
def main():
args = parse_args()
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
break
else:
sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
sys.exit(-1)
try:
loader = DataLoader()
config = loader.load_from_file(config_file)
provider = config.get('provider') or {}
wapi = WapiInventory(provider)
except Exception as exc:
sys.stdout.write(to_text(exc))
sys.exit(-1)
if args.host:
host_filter = {'name': args.host}
else:
host_filter = {}
config_filters = config.get('filters')
if config_filters.get('view') is not None:
host_filter['view'] = config_filters['view']
if config_filters.get('extattrs'):
extattrs = normalize_extattrs(config_filters['extattrs'])
else:
extattrs = {}
hostvars = {}
inventory = {
'_meta': {
'hostvars': hostvars
}
}
return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
hosts = wapi.get_object('record:host',
host_filter,
extattrs=extattrs,
return_fields=return_fields)
if hosts:
for item in hosts:
view = item['view']
name = item['name']
if view not in inventory:
inventory[view] = {'hosts': []}
inventory[view]['hosts'].append(name)
hostvars[name] = {
'view': view
}
if item.get('extattrs'):
for key, value in iteritems(flatten_extattrs(item['extattrs'])):
if key.startswith('ansible_'):
hostvars[name][key] = value
else:
if 'extattrs' not in hostvars[name]:
hostvars[name]['extattrs'] = {}
hostvars[name]['extattrs'][key] = value
sys.stdout.write(json.dumps(inventory, indent=4))
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
pratapvardhan/pandas | pandas/tests/io/json/test_normalize.py | 6 | 16358 | import pytest
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat, Index, DataFrame
from pandas.io.json import json_normalize
from pandas.io.json.normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
@pytest.fixture
def state_data():
return [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
@pytest.fixture
def author_missing_data():
return [
{'info': None},
{'info':
{'created_at': '11/08/1993', 'last_updated': '26/05/2012'},
'author_name':
{'first': 'Jane', 'last_name': 'Doe'}
}]
class TestJSONNormalize(object):
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({'A': {'A': 1, 'B': 2}})
expected = DataFrame([[1, 2]], columns=['A.A', 'A.B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_')
expected = DataFrame([[1, 2]], columns=['A_A', 'A_B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3')
expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']],
sep='_')
expected = Index(['name', 'pop',
'country', 'states_name']).sort_values()
assert result.columns.sort_values().equals(expected)
def test_value_array_record_prefix(self):
# GH 21536
result = json_normalize({'A': [1, 2]}, 'A', record_prefix='Prefix.')
expected = DataFrame([[1], [2]], columns=['Prefix.0'])
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
with pytest.raises(ValueError):
json_normalize(data, 'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_meta_parameter_not_modified(self):
# GH 18610
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
COLUMNS = ['foo', 'bar']
result = json_normalize(data, 'data', meta=COLUMNS,
meta_prefix='meta')
assert COLUMNS == ['foo', 'bar']
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
def test_missing_field(self, author_missing_data):
# GH20030:
result = json_normalize(author_missing_data)
ex_data = [
{'info': np.nan,
'author_name.first': np.nan,
'author_name.last_name': np.nan,
'info.created_at': np.nan,
'info.last_updated': np.nan},
{'info': None,
'author_name.first': 'Jane',
'author_name.last_name': 'Doe',
'info.created_at': '11/08/1993',
'info.last_updated': '26/05/2012'}
]
expected = DataFrame(ex_data)
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(object):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
assert result == expected
def test_json_normalize_errors(self):
# GH14583: If meta keys are not always present
# a new option to set errors='ignore' has been implemented
i = {
"Trades": [{
"general": {
"tradeid": 100,
"trade_version": 1,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}, {
"general": {
"tradeid": 100,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}
]
}
j = json_normalize(data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='ignore')
expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
'price': {0: '0', 1: '0', 2: '0', 3: '0'},
'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
assert j.fillna('').to_dict() == expected
pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
def test_donot_drop_nonevalues(self):
# GH21356
data = [
{'info': None,
'author_name':
{'first': 'Smith', 'last_name': 'Appleseed'}
},
{'info':
{'created_at': '11/08/1993', 'last_updated': '26/05/2012'},
'author_name':
{'first': 'Jane', 'last_name': 'Doe'}
}
]
result = nested_to_record(data)
expected = [
{'info': None,
'author_name.first': 'Smith',
'author_name.last_name': 'Appleseed'},
{'author_name.first': 'Jane',
'author_name.last_name': 'Doe',
'info.created_at': '11/08/1993',
'info.last_updated': '26/05/2012'}]
assert result == expected
def test_nonetype_top_level_bottom_level(self):
# GH21158: If inner level json has a key with a null value
# make sure it doesnt do a new_d.pop twice and except
data = {
"id": None,
"location": {
"country": {
"state": {
"id": None,
"town.info": {
"id": None,
"region": None,
"x": 49.151580810546875,
"y": -33.148521423339844,
"z": 27.572303771972656}}}
}
}
result = nested_to_record(data)
expected = {
'id': None,
'location.country.state.id': None,
'location.country.state.town.info.id': None,
'location.country.state.town.info.region': None,
'location.country.state.town.info.x': 49.151580810546875,
'location.country.state.town.info.y': -33.148521423339844,
'location.country.state.town.info.z': 27.572303771972656}
assert result == expected
def test_nonetype_multiple_levels(self):
# GH21158: If inner level json has a key with a null value
# make sure it doesnt do a new_d.pop twice and except
data = {
"id": None,
"location": {
"id": None,
"country": {
"id": None,
"state": {
"id": None,
"town.info": {
"region": None,
"x": 49.151580810546875,
"y": -33.148521423339844,
"z": 27.572303771972656}}}
}
}
result = nested_to_record(data)
expected = {
'id': None,
'location.id': None,
'location.country.id': None,
'location.country.state.id': None,
'location.country.state.town.info.region': None,
'location.country.state.town.info.x': 49.151580810546875,
'location.country.state.town.info.y': -33.148521423339844,
'location.country.state.town.info.z': 27.572303771972656}
assert result == expected
| bsd-3-clause |
sitexa/foobnix | test/test_vk_service.py | 3 | 1820 | #-*- coding: utf-8 -*-
'''
Created on 21 нояб. 2010
@author: ivan
'''
import unittest
from foobnix.gui.service.vk_service import VKService
from foobnix.util.url_utils import get_url_type
class TestVKService(unittest.TestCase):
vk_service = VKService(True)
def test_login(self):
self.assertTrue(self.vk_service.is_connected())
def test_search_page(self):
self.assertTrue(self.vk_service.search("Madonna").find("Madonna") > -1)
def test_find_videos(self):
list = self.vk_service.find_videos_by_query("Мадонна")
for bean in list[:10]:
self.assertNotEquals("text/html", get_url_type(bean.path))
self.assertTrue(bean.path.startswith("http://"))
def test_find_track(self):
bean = self.vk_service.find_one_track("Мадонна")
self.assertTrue(bean.path.startswith("http://"))
def test_bad_link_track(self):
beans = self.vk_service.find_videos_by_query("akon-cry out of jou(michael jackson tribute")
"http://cs12907.vkontakte.ru/u87507380/video/bee60bc871.240.mp4"
path = beans[0].path
self.assertNotEquals("text/html", get_url_type(path))
def test_find_by_url(self):
list = self.vk_service.find_tracks_by_url("http://vkontakte.ru/audio.php?gid=2849#album_id=0&gid=2849&id=0&offset=200")
for bean in list:
self.assertTrue(bean.path.startswith("http://"))
def test_find_by_url_user(self):
list = self.vk_service.find_tracks_by_url("http://vkontakte.ru/audio.php?id=14775382")
for bean in list:
self.assertFalse('\">' in bean.text)
self.assertTrue(bean.path.startswith("http://"))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Antiun/odoo | addons/account/project/report/quantity_cost_ledger.py | 358 | 6204 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_analytic_quantity_cost_ledger(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_quantity_cost_ledger, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_quantity': self._sum_quantity,
'account_sum_quantity': self._account_sum_quantity,
})
def _lines_g(self, account_id, date1, date2, journals):
if not journals:
self.cr.execute("SELECT sum(aal.unit_amount) AS quantity, \
aa.code AS code, aa.name AS name, aa.id AS id \
FROM account_account AS aa, account_analytic_line AS aal \
WHERE (aal.account_id=%s) AND (aal.date>=%s) \
AND (aal.date<=%s) AND (aal.general_account_id=aa.id) \
AND aa.active \
GROUP BY aa.code, aa.name, aa.id ORDER BY aa.code",
(account_id, date1, date2))
else:
journal_ids = journals
self.cr.execute("SELECT sum(aal.unit_amount) AS quantity, \
aa.code AS code, aa.name AS name, aa.id AS id \
FROM account_account AS aa, account_analytic_line AS aal \
WHERE (aal.account_id=%s) AND (aal.date>=%s) \
AND (aal.date<=%s) AND (aal.general_account_id=aa.id) \
AND aa.active \
AND (aal.journal_id IN %s ) \
GROUP BY aa.code, aa.name, aa.id ORDER BY aa.code",
(account_id, date1, date2, tuple(journal_ids)))
res = self.cr.dictfetchall()
return res
def _lines_a(self, general_account_id, account_id, date1, date2, journals):
if not journals:
self.cr.execute("SELECT aal.name AS name, aal.code AS code, \
aal.unit_amount AS quantity, aal.date AS date, \
aaj.code AS cj \
FROM account_analytic_line AS aal, \
account_analytic_journal AS aaj \
WHERE (aal.general_account_id=%s) AND (aal.account_id=%s) \
AND (aal.date>=%s) AND (aal.date<=%s) \
AND (aal.journal_id=aaj.id) \
ORDER BY aal.date, aaj.code, aal.code",
(general_account_id, account_id, date1, date2))
else:
journal_ids = journals
self.cr.execute("SELECT aal.name AS name, aal.code AS code, \
aal.unit_amount AS quantity, aal.date AS date, \
aaj.code AS cj \
FROM account_analytic_line AS aal, \
account_analytic_journal AS aaj \
WHERE (aal.general_account_id=%s) AND (aal.account_id=%s) \
AND (aal.date>=%s) AND (aal.date<=%s) \
AND (aal.journal_id=aaj.id) AND (aaj.id IN %s) \
ORDER BY aal.date, aaj.code, aal.code",
(general_account_id, account_id, date1, date2,tuple(journal_ids)))
res = self.cr.dictfetchall()
return res
def _account_sum_quantity(self, account_id, date1, date2, journals):
if not journals:
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id=%s AND date>=%s AND date<=%s",
(account_id, date1, date2))
else:
journal_ids = journals
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id = %s AND date >= %s AND date <= %s \
AND journal_id IN %s",
(account_id, date1, date2, tuple(journal_ids),))
return self.cr.fetchone()[0] or 0.0
def _sum_quantity(self, accounts, date1, date2, journals):
ids = map(lambda x: x.id, accounts)
if not ids:
return 0.0
if not journals:
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s",
(tuple(ids), date1, date2,))
else:
journal_ids = journals
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date >= %s AND date <= %s \
AND journal_id IN %s",(tuple(ids), date1, date2, tuple(journal_ids)))
return self.cr.fetchone()[0] or 0.0
class report_analyticcostledgerquantity(osv.AbstractModel):
_name = 'report.account.report_analyticcostledgerquantity'
_inherit = 'report.abstract_report'
_template = 'account.report_analyticcostledgerquantity'
_wrapped_report_class = account_analytic_quantity_cost_ledger
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HerlanAssis/Django-AulaOsvandoSantana | lib/python2.7/site-packages/pip/wheel.py | 145 | 20120 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import with_statement
import compileall
import csv
import functools
import hashlib
import os
import re
import shutil
import sys
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip.backwardcompat import ConfigParser, StringIO
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme
from pip.log import logger
from pip import pep425tags
from pip.util import call_subprocess, normalize_path, make_path_relative
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
def rehash(path, algo='sha256', blocksize=1<<20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256='+urlsafe_b64encode(h.digest()).decode('latin1').rstrip('=')
return (digest, length)
try:
unicode
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
script = open(path, 'rb')
try:
firstline = script.readline()
if not firstline.startswith(binary('#!python')):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = binary('#!') + exename + binary(os.linesep)
rest = script.read()
finally:
script.close()
script = open(path, 'wb')
try:
script.write(firstline)
script.write(rest)
finally:
script.close()
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = ConfigParser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True):
"""Install a wheel"""
scheme = distutils_scheme(name, user=user, home=home, root=root)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
compileall.compile_dir(source, force=True, quiet=True)
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
if not os.path.exists(dest): # common for the 'include' path
os.makedirs(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base
and s.endswith('.dist-info')
# is self.req.project_name case preserving?
and s.lower().startswith(req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
if not os.path.exists(destsubdir):
os.makedirs(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
shutil.move(srcfile, destfile)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [k for k in console
if re.match(r'easy_install(-\d\.\d)?$', k)]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(maker.make_multiple(['%s = %s' % kv for kv in console.items()]))
if len(gui) > 0:
generated.extend(maker.make_multiple(['%s = %s' % kv for kv in gui.items()], {'gui': True}))
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.req import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base+'.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warn('Installing from a newer Wheel-Version (%s)'
% '.'.join(map(str, version)))
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename("%s is not a valid wheel filename." % filename)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set((x, y, z) for x in self.pyversions for y
in self.abis for z in self.plats)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, wheel_dir, build_options=[], global_options=[]):
self.requirement_set = requirement_set
self.finder = finder
self.wheel_dir = normalize_path(wheel_dir)
self.build_options = build_options
self.global_options = global_options
def _build_one(self, req):
"""Build one wheel."""
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"\
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % req.setup_py] + \
list(self.global_options)
logger.notify('Running setup.py bdist_wheel for %s' % req.name)
logger.notify('Destination directory: %s' % self.wheel_dir)
wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] + self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s' % req.name)
return False
def build(self):
"""Build wheels."""
#unpack and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = [req for req in reqset if not req.is_wheel]
if not buildset:
return
#build the wheels
logger.notify(
'Building wheels for collected packages: %s' %
','.join([req.name for req in buildset])
)
logger.indent += 2
build_success, build_failure = [], []
for req in buildset:
if self._build_one(req):
build_success.append(req)
else:
build_failure.append(req)
logger.indent -= 2
#notify sucess/failure
if build_success:
logger.notify('Successfully built %s' % ' '.join([req.name for req in build_success]))
if build_failure:
logger.notify('Failed to build %s' % ' '.join([req.name for req in build_failure]))
| mit |
open-falcon/portal | web/model/template.py | 6 | 3335 | # -*- coding:utf-8 -*-
__author__ = 'Ulric Qin'
from .bean import Bean
from frame.config import MAINTAINERS
from frame.api import uic
from .strategy import Strategy
from .action import Action
class Template(Bean):
_tbl = 'tpl'
_cols = 'id, tpl_name, parent_id, action_id, create_user'
def __init__(self, _id, tpl_name, parent_id, action_id, create_user):
self.id = _id
self.tpl_name = tpl_name
self.parent_id = parent_id
self.action_id = action_id
self.create_user = create_user
self.parent = None
self.action = None
def to_json(self):
return {
'id': self.id,
'name': self.tpl_name,
'parent_id': self.parent_id,
'action_id': self.action_id,
'create_user': self.create_user,
}
@classmethod
def query(cls, page, limit, query, me=None):
where = ''
params = []
if me is not None:
where = 'create_user = %s'
params = [me]
if query:
where += ' and ' if where else ''
where += 'tpl_name like %s'
params.append('%' + query + '%')
vs = cls.select_vs(where=where, params=params, page=page, limit=limit, order='tpl_name')
total = cls.total(where, params)
return vs, total
def writable(self, login_user):
if self.create_user == login_user:
return True
if login_user in MAINTAINERS:
return True
a = self.action
if not a:
return False
if not a.uic:
return False
return uic.email_in_groups(login_user, a.uic)
def fork(self, login_user):
new_name = 'copy_of_' + self.tpl_name
if self.__class__.read('tpl_name=%s', [new_name]):
return -1
# fork action
action_id = self.action_id
if action_id:
action = Action.get(action_id)
if action:
action_id = Action.insert(
{
'uic': action.uic,
'url': action.url,
'callback': action.callback,
'before_callback_sms': action.before_callback_sms,
'before_callback_mail': action.before_callback_mail,
'after_callback_sms': action.after_callback_sms,
'after_callback_mail': action.after_callback_mail,
}
)
# fork tpl
tpl_id = self.__class__.insert({
'tpl_name': new_name,
'parent_id': self.parent_id,
'action_id': action_id,
'create_user': login_user,
})
# fork strategy
ss = Strategy.select_vs(where='tpl_id = %s', params=[self.id])
for s in ss:
Strategy.insert({
'metric': s.metric,
'tags': s.tags,
'max_step': s.max_step,
'priority': s.priority,
'func': s.func,
'op': s.op,
'right_value': s.right_value,
'note': s.note,
'run_begin': s.run_begin,
'run_end': s.run_end,
'tpl_id': tpl_id,
})
return tpl_id | apache-2.0 |
40223119/w17test | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/locals.py | 603 | 1141 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""Set of functions from PyGame that are handy to have in
the local namespace for your module"""
from pygame.constants import *
from pygame.rect import Rect
import pygame.color as color
Color = color.Color
| gpl-3.0 |
mbedmicro/pyOCD | pyocd/probe/pydapaccess/interface/interface.py | 3 | 1719 | # pyOCD debugger
# Copyright (c) 2006-2013,2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Interface(object):
def __init__(self):
self.vid = 0
self.pid = 0
self.vendor_name = ""
self.product_name = ""
self.serial_number = ""
self.packet_count = 1
self.packet_size = 64
@property
def has_swo_ep(self):
return False
def open(self):
return
def close(self):
return
def write(self, data):
return
def read(self, size=-1, timeout=-1):
return
def get_info(self):
return self.vendor_name + " " + \
self.product_name + " (" + \
str(hex(self.vid)) + ", " + \
str(hex(self.pid)) + ")"
def get_packet_count(self):
return self.packet_count
def set_packet_count(self, count):
# No interface level restrictions on count
self.packet_count = count
def set_packet_size(self, size):
self.packet_size = size
def get_packet_size(self):
return self.packet_size
def get_serial_number(self):
return self.serial_number
| apache-2.0 |
DOAJ/doaj | portality/forms/application_processors.py | 1 | 46290 | import uuid
from datetime import datetime
import portality.notifications.application_emails as emails
from portality.core import app
from portality import models, constants, app_email
from portality.lib.formulaic import FormProcessor
from portality.ui.messages import Messages
from portality.crosswalks.application_form import ApplicationFormXWalk
from portality.crosswalks.journal_form import JournalFormXWalk
from portality.formcontext.choices import Choices
from portality.bll import exceptions
from flask import url_for, request, has_request_context
from flask_login import current_user
from wtforms import FormField, FieldList
class ApplicationProcessor(FormProcessor):
def pre_validate(self):
# to bypass WTForms insistence that choices on a select field match the value, outside of the actual validation
# chain
super(ApplicationProcessor, self).pre_validate()
def _carry_fixed_aspects(self):
if self.source is None:
raise Exception("Cannot carry data from a non-existent source")
now = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
# copy over any important fields from the previous version of the object
created_date = self.source.created_date if self.source.created_date else now
self.target.set_created(created_date)
if "id" in self.source.data:
self.target.data['id'] = self.source.data['id']
try:
if self.source.date_applied is not None:
self.target.date_applied = self.source.date_applied
except AttributeError:
# fixme: should there always be a date_applied? Only true for applications
pass
try:
if self.source.current_application:
self.target.set_current_application(self.source.current_application)
except AttributeError:
# this means that the source doesn't know about current_applications, which is fine
pass
try:
if self.source.current_journal:
self.target.set_current_journal(self.source.current_journal)
except AttributeError:
# this means that the source doesn't know about current_journals, which is fine
pass
try:
if self.source.related_journal:
self.target.set_related_journal(self.source.related_journal)
except AttributeError:
# this means that the source doesn't know about related_journals, which is fine
pass
try:
if self.source.related_applications:
related = self.source.related_applications
for rel in related:
self.target.add_related_application(rel.get("application_id"), rel.get("date_accepted"))
except AttributeError:
# this means that the source doesn't know about related_applications, which is fine
pass
# if the source is a journal, we need to carry the in_doaj flag
if isinstance(self.source, models.Journal):
self.target.set_in_doaj(self.source.is_in_doaj())
def _merge_notes_forward(self, allow_delete=False):
if self.source is None:
raise Exception("Cannot carry data from a non-existent source")
if self.target is None:
raise Exception("Cannot carry data on to a non-existent target - run the xwalk first")
# first off, get the notes (by reference) in the target and the notes from the source
tnotes = self.target.notes
snotes = self.source.notes
# if there are no notes, we might not have the notes by reference, so later will
# need to set them by value
apply_notes_by_value = len(tnotes) == 0
# for each of the target notes we need to get the original dates from the source notes
for n in tnotes:
for sn in snotes:
if n.get("id") == sn.get("id"):
n["date"] = sn.get("date")
# record the positions of any blank notes
i = 0
removes = []
for n in tnotes:
if n.get("note").strip() == "":
removes.append(i)
i += 1
# actually remove all the notes marked for deletion
removes.sort(reverse=True)
for r in removes:
tnotes.pop(r)
# finally, carry forward any notes that aren't already in the target
if not allow_delete:
for sn in snotes:
found = False
for tn in tnotes:
if sn.get("id") == tn.get("id"):
found = True
if not found:
tnotes.append(sn)
if apply_notes_by_value:
self.target.set_notes(tnotes)
def _carry_continuations(self):
if self.source is None:
raise Exception("Cannot carry data from a non-existent source")
try:
sbj = self.source.bibjson()
tbj = self.target.bibjson()
if sbj.replaces:
tbj.replaces = sbj.replaces
if sbj.is_replaced_by:
tbj.is_replaced_by = sbj.is_replaced_by
if sbj.discontinued_date:
tbj.discontinued_date = sbj.discontinued_date
except AttributeError:
# this means that the source doesn't know about current_applications, which is fine
pass
class NewApplication(ApplicationProcessor):
"""
Public Application Form Context. This is also a sort of demonstrator as to how to implement
one, so it will do unnecessary things like override methods that don't actually need to be overridden.
This should be used in a context where an unauthenticated user is making a request to put a journal into the
DOAJ. It does not have any edit capacity (i.e. the form can only be submitted once), and it does not provide
any form fields other than the essential journal bibliographic, application bibliographc and contact information
for the suggester. On submission, it will set the status to "pending" and the item will be available for review
by the editors
"""
############################################################
# PublicApplicationForm versions of FormProcessor lifecycle functions
############################################################
def draft(self, account, id=None, *args, **kwargs):
# check for validity
valid = self.validate()
# FIXME: if you can only save a valid draft, you cannot save a draft
# the draft to be saved needs to be valid
#if not valid:
# return None
def _resetDefaults(form):
for field in form:
if field.errors:
if isinstance(field, FormField):
_resetDefaults(field.form)
elif isinstance(field, FieldList):
for sub in field:
if isinstance(sub, FormField):
_resetDefaults(sub)
else:
sub.data = sub.default
else:
field.data = field.default
# if not valid, then remove all fields which have validation errors
if not valid:
_resetDefaults(self.form)
self.form2target()
draft_application = models.DraftApplication(**self.target.data)
if id is not None:
draft_application.set_id(id)
draft_application.set_application_status("draft")
draft_application.set_owner(account.id)
draft_application.save()
return draft_application
def finalise(self, account, save_target=True, email_alert=True, id=None):
super(NewApplication, self).finalise()
# set some administrative data
now = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
self.target.date_applied = now
self.target.set_application_status(constants.APPLICATION_STATUS_PENDING)
self.target.set_owner(account.id)
self.target.set_last_manual_update()
if id:
replacing = models.Application.pull(id)
if replacing is None:
self.target.set_id(id)
else:
if replacing.application_status == constants.APPLICATION_STATUS_PENDING and replacing.owner == account.id:
self.target.set_id(id)
self.target.set_created(replacing.created_date)
# Finally save the target
if save_target:
self.target.save()
# a draft may have been saved, so also remove that
if id:
models.DraftApplication.remove_by_id(id)
if email_alert:
try:
emails.send_received_email(self.target)
except app_email.EmailException as e:
self.add_alert(Messages.FORMS__APPLICATION_PROCESSORS__NEW_APPLICATION__FINALISE__USER_EMAIL_ERROR)
app.logger.exception(Messages.FORMS__APPLICATION_PROCESSORS__NEW_APPLICATION__FINALISE__LOG_EMAIL_ERROR)
class AdminApplication(ApplicationProcessor):
"""
Managing Editor's Application Review form. Should be used in a context where the form warrants full
admin priviledges. It will permit conversion of applications to journals, and assignment of owner account
as well as assignment to editorial group.
"""
def pre_validate(self):
# to bypass WTForms insistence that choices on a select field match the value, outside of the actual validation
# chain
super(AdminApplication, self).pre_validate()
self.form.editor.choices = [(self.form.editor.data, self.form.editor.data)]
# TODO: Should quick_reject be set through this form at all?
self.form.quick_reject.choices = [(self.form.quick_reject.data, self.form.quick_reject.data)]
def patch_target(self):
super(AdminApplication, self).patch_target()
# This patches the target with things that shouldn't change from the source
self._carry_fixed_aspects()
self._merge_notes_forward(allow_delete=True)
# NOTE: this means you can't unset an owner once it has been set. But you can change it.
if (self.target.owner is None or self.target.owner == "") and (self.source.owner is not None):
self.target.set_owner(self.source.owner)
def finalise(self, account, save_target=True, email_alert=True):
"""
account is the administrator account carrying out the action
"""
if self.source is None:
raise Exception("You cannot edit a not-existent application")
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
raise Exception("You cannot edit applications which have been accepted into DOAJ.")
# if we are allowed to finalise, kick this up to the superclass
super(AdminApplication, self).finalise()
# TODO: should these be a BLL feature?
# If we have changed the editors assigned to this application, let them know.
is_editor_group_changed = ApplicationFormXWalk.is_new_editor_group(self.form, self.source)
is_associate_editor_changed = ApplicationFormXWalk.is_new_editor(self.form, self.source)
# record the event in the provenance tracker
models.Provenance.make(account, "edit", self.target)
# delayed import of the DOAJ BLL
from portality.bll.doaj import DOAJ
applicationService = DOAJ.applicationService()
# if the application is already rejected, and we are moving it back into a non-rejected status
if self.source.application_status == constants.APPLICATION_STATUS_REJECTED and self.target.application_status != constants.APPLICATION_STATUS_REJECTED:
try:
applicationService.unreject_application(self.target, current_user._get_current_object(), disallow_status=[])
except exceptions.DuplicateUpdateRequest as e:
self.add_alert(Messages.FORMS__APPLICATION_PROCESSORS__ADMIN_APPLICATION__FINALISE__COULD_NOT_UNREJECT)
return
# if this application is being accepted, then do the conversion to a journal
if self.target.application_status == constants.APPLICATION_STATUS_ACCEPTED:
j = applicationService.accept_application(self.target, account)
# record the url the journal is available at in the admin are and alert the user
if has_request_context(): # fixme: if we handle alerts via a notification service we won't have to toggle on request context
jurl = url_for("doaj.toc", identifier=j.toc_id)
if self.source.current_journal is not None: # todo: are alerts displayed?
self.add_alert('<a href="{url}" target="_blank">Existing journal updated</a>.'.format(url=jurl))
else:
self.add_alert('<a href="{url}" target="_blank">New journal created</a>.'.format(url=jurl))
# Add the journal to the account and send the notification email
try:
owner = models.Account.pull(j.owner)
self.add_alert('Associating the journal with account {username}.'.format(username=owner.id))
owner.add_journal(j.id)
if not owner.has_role('publisher'):
owner.add_role('publisher')
owner.save()
# for all acceptances, send an email to the owner of the journal
if email_alert:
self._send_application_approved_email(j.bibjson().title, owner.name, owner.email, self.source.current_journal is not None)
except AttributeError:
raise Exception("Account {owner} does not exist".format(owner=j.owner))
except app_email.EmailException:
self.add_alert("Problem sending email to suggester - probably address is invalid")
app.logger.exception("Acceptance email to owner failed.")
# if the application was instead rejected, carry out the rejection actions
elif self.source.application_status != constants.APPLICATION_STATUS_REJECTED and self.target.application_status == constants.APPLICATION_STATUS_REJECTED:
# remember whether this was an update request or not
is_update_request = self.target.current_journal is not None
# reject the application
applicationService.reject_application(self.target, current_user._get_current_object())
# if this was an update request, send an email to the owner
if is_update_request and email_alert:
sent = False
send_report = []
try:
send_report = emails.send_publisher_reject_email(self.target, update_request=is_update_request)
sent = True
except app_email.EmailException as e:
pass
if sent:
self.add_alert(Messages.SENT_REJECTED_UPDATE_REQUEST_EMAIL.format(user=self.target.owner, email=send_report[0].get("email"), name=send_report[0].get("name")))
else:
self.add_alert(Messages.NOT_SENT_REJECTED_UPDATE_REQUEST_EMAIL.format(user=self.target.owner))
# the application was neither accepted or rejected, so just save it
else:
self.target.set_last_manual_update()
self.target.save()
if email_alert:
# if revisions were requested, email the publisher
if self.source.application_status != constants.APPLICATION_STATUS_REVISIONS_REQUIRED and self.target.application_status == constants.APPLICATION_STATUS_REVISIONS_REQUIRED:
try:
emails.send_publisher_update_request_revisions_required(self.target)
self.add_alert(Messages.SENT_REJECTED_UPDATE_REQUEST_REVISIONS_REQUIRED_EMAIL.format(user=self.target.owner))
except app_email.EmailException as e:
self.add_alert(Messages.NOT_SENT_REJECTED_UPDATE_REQUEST_REVISIONS_REQUIRED_EMAIL.format(user=self.target.owner))
# if we need to email the editor and/or the associate, handle those here
if is_editor_group_changed:
try:
emails.send_editor_group_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to editor - probably address is invalid")
app.logger.exception("Email to associate failed.")
if is_associate_editor_changed:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception("Email to associate failed.")
# If this is the first time this application has been assigned to an editor, notify the publisher.
old_ed = self.source.editor
if (old_ed is None or old_ed == '') and self.target.editor is not None:
is_update_request = self.target.current_journal is not None
if is_update_request:
alerts = emails.send_publisher_update_request_editor_assigned_email(self.target)
else:
alerts = emails.send_publisher_application_editor_assigned_email(self.target)
for alert in alerts:
self.add_alert(alert)
# Inform editor and associate editor if this application was 'ready' or 'completed', but has been changed to 'in progress'
if (self.source.application_status == constants.APPLICATION_STATUS_READY or self.source.application_status == constants.APPLICATION_STATUS_COMPLETED) and self.target.application_status == constants.APPLICATION_STATUS_IN_PROGRESS:
# First, the editor
try:
emails.send_editor_inprogress_email(self.target)
self.add_alert('An email has been sent to notify the editor of the change in status.')
except AttributeError:
magic = str(uuid.uuid1())
self.add_alert('Couldn\'t find a recipient for this email - check editor groups are correct. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('No editor recipient for failed review email - ' + magic)
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Sending the failed review email to editor didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending review failed email to editor - ' + magic)
# Then the associate
try:
emails.send_assoc_editor_inprogress_email(self.target)
self.add_alert('An email has been sent to notify the assigned associate editor of the change in status.')
except AttributeError:
magic = str(uuid.uuid1())
self.add_alert('Couldn\'t find a recipient for this email - check an associate editor is assigned. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('No associate editor recipient for failed review email - ' + magic)
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Sending the failed review email to associate editor didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending review failed email to associate editor - ' + magic)
# email other managing editors if this was newly set to 'ready'
if self.source.application_status != constants.APPLICATION_STATUS_READY and self.target.application_status == constants.APPLICATION_STATUS_READY:
# this template requires who made the change, say it was an Admin
ed_id = 'an administrator'
try:
emails.send_admin_ready_email(self.target, editor_id=ed_id)
self.add_alert('A confirmation email has been sent to the Managing Editors.')
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Sending the ready status to managing editors didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending ready status email to managing editors - ' + magic)
def _send_application_approved_email(self, journal_title, publisher_name, email, update_request=False):
"""Email the publisher when an application is accepted (it's here because it's too troublesome to factor out)"""
url_root = request.url_root
if url_root.endswith("/"):
url_root = url_root[:-1]
to = [email]
fro = app.config.get('SYSTEM_EMAIL_FROM', 'feedback@doaj.org')
if update_request:
subject = app.config.get("SERVICE_NAME", "") + " - update request accepted"
else:
subject = app.config.get("SERVICE_NAME", "") + " - journal accepted"
publisher_name = publisher_name if publisher_name is not None else "Journal Owner"
try:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
msg = Messages.SENT_ACCEPTED_APPLICATION_EMAIL.format(email=email)
template = "email/publisher_application_accepted.txt"
if update_request:
msg = Messages.SENT_ACCEPTED_UPDATE_REQUEST_EMAIL.format(email=email)
template = "email/publisher_update_request_accepted.txt"
jn = journal_title
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name=template,
journal_title=jn,
publisher_name=publisher_name,
url_root=url_root
)
self.add_alert(msg)
else:
msg = Messages.NOT_SENT_ACCEPTED_APPLICATION_EMAIL.format(email=email)
if update_request:
msg = Messages.NOT_SENT_ACCEPTED_UPDATE_REQUEST_EMAIL.format(email=email)
self.add_alert(msg)
except Exception as e:
magic = str(uuid.uuid1())
self.add_alert('Sending the journal acceptance information email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending application approved email failed - ' + magic)
def validate(self):
_statuses_not_requiring_validation = ['rejected', 'pending', 'in progress', 'on hold']
# make use of the ability to disable validation, otherwise, let it run
if self.form is not None:
if self.form.application_status.data in _statuses_not_requiring_validation:
self.pre_validate()
return True
return super(AdminApplication, self).validate()
class EditorApplication(ApplicationProcessor):
"""
Editors Application Review form. This should be used in a context where an editor who owns an editorial group
is accessing an application. This prevents re-assignment of Editorial group, but permits assignment of associate
editor. It also permits change in application state, except to "accepted"; therefore this form context cannot
be used to create journals from applications. Deleting notes is not allowed, but adding is.
"""
def pre_validate(self):
# Call to super sets all the basic disabled fields
super(EditorApplication, self).pre_validate()
# although the editor_group field is handled by the general pre-validator, we still need to set the choices
# self.form.editor_group.data = self.source.editor_group
self.form.editor.choices = [(self.form.editor.data, self.form.editor.data)]
# This is no longer necessary, is handled by the main pre_validate function
#if self._formulaic.get('application_status').is_disabled:
# self.form.application_status.data = self.source.application_status
# but we do still need to add the overwritten status to the choices for validation
if self.form.application_status.data not in [c[0] for c in self.form.application_status.choices]:
self.form.application_status.choices.append((self.form.application_status.data, self.form.application_status.data))
def patch_target(self):
super(EditorApplication, self).patch_target()
self._carry_fixed_aspects()
self._merge_notes_forward()
self._carry_continuations()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
def finalise(self):
if self.source is None:
raise Exception("You cannot edit a not-existent application")
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
raise Exception("You cannot edit applications which have been accepted into DOAJ.")
# if we are allowed to finalise, kick this up to the superclass
super(EditorApplication, self).finalise()
# Check the status change is valid
# TODO: we want to rid ourselves of the Choices module
Choices.validate_status_change('editor', self.source.application_status, self.target.application_status)
# FIXME: may want to factor this out of the suggestionformxwalk
new_associate_assigned = ApplicationFormXWalk.is_new_editor(self.form, self.source)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# record the event in the provenance tracker
models.Provenance.make(current_user, "edit", self.target)
# if we need to email the associate because they have just been assigned, handle that here.
if new_associate_assigned:
try:
self.add_alert("New editor assigned - email with confirmation has been sent")
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception('Error sending associate assigned email')
# If this is the first time this application has been assigned to an editor, notify the publisher.
old_ed = self.source.editor
if (old_ed is None or old_ed == '') and self.target.editor is not None:
is_update_request = self.target.current_journal is not None
if is_update_request:
alerts = emails.send_publisher_update_request_editor_assigned_email(self.target)
else:
alerts = emails.send_publisher_application_editor_assigned_email(self.target)
for alert in alerts:
self.add_alert(alert)
# Email the assigned associate if the application was reverted from 'completed' to 'in progress' (failed review)
if self.source.application_status == constants.APPLICATION_STATUS_COMPLETED and self.target.application_status == constants.APPLICATION_STATUS_IN_PROGRESS:
try:
emails.send_assoc_editor_inprogress_email(self.target)
self.add_alert(
'An email has been sent to notify the assigned associate editor of the change in status.')
except AttributeError as e:
magic = str(uuid.uuid1())
self.add_alert(
'Couldn\'t find a recipient for this email - check an associate editor is assigned. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('No associate editor recipient for failed review email - ' + magic)
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert(
'Sending the failed review email to associate editor didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending failed review email to associate editor - ' + magic)
# email managing editors if the application was newly set to 'ready'
if self.source.application_status != constants.APPLICATION_STATUS_READY and self.target.application_status == constants.APPLICATION_STATUS_READY:
# Tell the ManEds who has made the status change - the editor in charge of the group
editor_group_name = self.target.editor_group
editor_group_id = models.EditorGroup.group_exists_by_name(name=editor_group_name)
editor_group = models.EditorGroup.pull(editor_group_id)
editor_acc = editor_group.get_editor_account()
# record the event in the provenance tracker
models.Provenance.make(current_user, "status:ready", self.target)
editor_id = editor_acc.id
try:
emails.send_admin_ready_email(self.target, editor_id=editor_id)
self.add_alert('A confirmation email has been sent to the Managing Editors.')
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert(
'Sending the ready status to managing editors didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending ready status email to managing editors - ' + magic)
class AssociateApplication(ApplicationProcessor):
"""
Associate Editors Application Review form. This is to be used in a context where an associate editor (fewest rights)
needs to access an application for review. This editor cannot change the editorial group or the assigned editor.
They also cannot change the owner of the application. They cannot set an application to "Accepted" so this form can't
be used to create a journal from an application. They cannot delete, only add notes.
"""
def pre_validate(self):
# Call to super sets all the basic disabled fields
super(AssociateApplication, self).pre_validate()
# no longer necessary, handled by superclass pre_validate
#if self._formulaic.get('application_status').is_disabled:
# self.form.application_status.data = self.source.application_status
# but we do still need to add the overwritten status to the choices for validation
if self.form.application_status.data not in [c[0] for c in self.form.application_status.choices]:
self.form.application_status.choices.append(
(self.form.application_status.data, self.form.application_status.data))
def patch_target(self):
if self.source is None:
raise Exception("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self.target.set_editor(self.source.editor)
self.target.set_seal(self.source.has_seal())
self._carry_continuations()
def finalise(self):
# if we are allowed to finalise, kick this up to the superclass
super(AssociateApplication, self).finalise()
# Check the status change is valid
Choices.validate_status_change('associate', self.source.application_status, self.target.application_status)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# record the event in the provenance tracker
models.Provenance.make(current_user, "edit", self.target)
# inform publisher if this was set to 'in progress' from 'pending'
if self.source.application_status == constants.APPLICATION_STATUS_PENDING and self.target.application_status == constants.APPLICATION_STATUS_IN_PROGRESS:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
is_update_request = self.target.current_journal is not None
if is_update_request:
alerts = emails.send_publisher_update_request_inprogress_email(self.target)
else:
alerts = emails.send_publisher_application_inprogress_email(self.target)
for alert in alerts:
self.add_alert(alert)
else:
self.add_alert(Messages.IN_PROGRESS_NOT_SENT_EMAIL_DISABLED)
# inform editor if this was newly set to 'completed'
if self.source.application_status != constants.APPLICATION_STATUS_COMPLETED and self.target.application_status == constants.APPLICATION_STATUS_COMPLETED:
# record the event in the provenance tracker
models.Provenance.make(current_user, "status:completed", self.target)
try:
emails.send_editor_completed_email(self.target)
self.add_alert('A confirmation email has been sent to notify the editor of the change in status.')
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert(
'Sending the ready status to editor email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending completed status email to editor - ' + magic)
class PublisherUpdateRequest(ApplicationProcessor):
def pre_validate(self):
if self.source is None:
raise Exception("You cannot validate a form from a non-existent source")
super(ApplicationProcessor, self).pre_validate()
# no longer required, handled by call to superclass pre_validate
# carry forward the disabled fields
#bj = self.source.bibjson()
#self.form.title.data = bj.title
#self.form.alternative_title.data = bj.alternative_title
#pissn = bj.pissn
#if pissn == "": pissn = None
#self.form.pissn.data = pissn
#eissn = bj.eissn
#if eissn == "": eissn = None
#self.form.eissn.data = eissn
def patch_target(self):
if self.source is None:
raise Exception("You cannot patch a target from a non-existent source")
self._carry_subjects_and_seal()
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self.target.set_editor(self.source.editor)
self._carry_continuations()
# we carry this over for completeness, although it will be overwritten in the finalise() method
self.target.set_application_status(self.source.application_status)
def finalise(self, save_target=True, email_alert=True):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise Exception("You cannot edit a not-existent application")
# if we are allowed to finalise, kick this up to the superclass
super(PublisherUpdateRequest, self).finalise()
# set the status to update_request (if not already)
self.target.set_application_status(constants.APPLICATION_STATUS_UPDATE_REQUEST)
# Save the target
self.target.set_last_manual_update()
if save_target:
saved = self.target.save()
if saved is None:
raise Exception("Save on application failed")
# obtain the related journal, and attach the current application id to it
journal_id = self.target.current_journal
from portality.bll.doaj import DOAJ
journalService = DOAJ.journalService()
if journal_id is not None:
journal, _ = journalService.journal(journal_id)
if journal is not None:
journal.set_current_application(self.target.id)
if save_target:
saved = journal.save()
if saved is None:
raise Exception("Save on journal failed")
else:
self.target.remove_current_journal()
# email the publisher to tell them we received their update request
if email_alert:
try:
self._send_received_email()
except app_email.EmailException as e:
self.add_alert("We were unable to send you an email confirmation - possible problem with your email address")
app.logger.exception('Error sending reapplication received email to publisher')
def _carry_subjects_and_seal(self):
# carry over the subjects
source_subjects = self.source.bibjson().subject
self.target.bibjson().subject = source_subjects
# carry over the seal
self.target.set_seal(self.source.has_seal())
def _send_received_email(self):
acc = models.Account.pull(self.target.owner)
if acc is None:
self.add_alert("Unable to locate account for specified owner")
return
journal_name = self.target.bibjson().title #.encode('utf-8', 'replace')
to = [acc.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', 'feedback@doaj.org')
subject = app.config.get("SERVICE_NAME","") + " - update request received"
try:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_update_request_received.txt",
journal_name=journal_name,
username=self.target.owner
)
self.add_alert('A confirmation email has been sent to ' + acc.email + '.')
except app_email.EmailException as e:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the "update request received" email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.error(magic + "\n" + repr(e))
raise e
class PublisherUpdateRequestReadOnly(ApplicationProcessor):
"""
Read Only Application form for publishers. Nothing can be changed. Useful to show publishers what they
currently have submitted for review
"""
def finalise(self):
raise Exception("You cannot edit applications using the read-only form")
###############################################
### Journal form processors
###############################################
class ManEdJournalReview(ApplicationProcessor):
"""
Managing Editor's Journal Review form. Should be used in a context where the form warrants full
admin privileges. It will permit doing every action.
"""
def patch_target(self):
if self.source is None:
raise Exception("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward(allow_delete=True)
# NOTE: this means you can't unset an owner once it has been set. But you can change it.
if (self.target.owner is None or self.target.owner == "") and (self.source.owner is not None):
self.target.set_owner(self.source.owner)
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise Exception("You cannot edit a not-existent journal")
# if we are allowed to finalise, kick this up to the superclass
super(ManEdJournalReview, self).finalise()
# FIXME: may want to factor this out of the suggestionformxwalk
# If we have changed the editors assinged to this application, let them know.
is_editor_group_changed = JournalFormXWalk.is_new_editor_group(self.form, self.source)
is_associate_editor_changed = JournalFormXWalk.is_new_editor(self.form, self.source)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# if we need to email the editor and/or the associate, handle those here
if is_editor_group_changed:
try:
emails.send_editor_group_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to editor - probably address is invalid")
app.logger.exception('Error sending assignment email to editor.')
if is_associate_editor_changed:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception('Error sending assignment email to associate.')
def validate(self):
# make use of the ability to disable validation, otherwise, let it run
if self.form is not None:
if self.form.make_all_fields_optional.data:
self.pre_validate()
return True
return super(ManEdJournalReview, self).validate()
class EditorJournalReview(ApplicationProcessor):
"""
Editors Journal Review form. This should be used in a context where an editor who owns an editorial group
is accessing a journal. This prevents re-assignment of Editorial group, but permits assignment of associate
editor.
"""
def patch_target(self):
if self.source is None:
raise Exception("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self._merge_notes_forward()
self._carry_continuations()
def pre_validate(self):
# call to super handles all the basic disabled field
super(EditorJournalReview, self).pre_validate()
# although the superclass sets the value of the disabled field, we still need to set the choices
# self.form.editor_group.data = self.source.editor_group
self.form.editor.choices = [(self.form.editor.data, self.form.editor.data)]
def finalise(self):
if self.source is None:
raise Exception("You cannot edit a not-existent journal")
# if we are allowed to finalise, kick this up to the superclass
super(EditorJournalReview, self).finalise()
email_associate = ApplicationFormXWalk.is_new_editor(self.form, self.source)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# if we need to email the associate, handle that here.
if email_associate:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception('Error sending assignment email to associate.')
class AssEdJournalReview(ApplicationProcessor):
"""
Associate Editors Journal Review form. This is to be used in a context where an associate editor (fewest rights)
needs to access a journal for review. This editor cannot change the editorial group or the assigned editor.
They also cannot change the owner of the journal. They cannot delete, only add notes.
"""
def patch_target(self):
if self.source is None:
raise Exception("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self.target.set_editor(self.source.editor)
self._carry_continuations()
def finalise(self):
if self.source is None:
raise Exception("You cannot edit a not-existent journal")
# if we are allowed to finalise, kick this up to the superclass
super(AssEdJournalReview, self).finalise()
# Save the target
self.target.set_last_manual_update()
self.target.save()
class ReadOnlyJournal(ApplicationProcessor):
"""
Read Only Journal form. Nothing can be changed. Useful for reviewing a journal and an application
(or update request) side by side in 2 browser windows or tabs.
"""
def form2target(self):
pass # you can't edit objects using this form
def patch_target(self):
pass # you can't edit objects using this form
def finalise(self):
raise Exception("You cannot edit journals using the read-only form")
class ManEdBulkEdit(ApplicationProcessor):
"""
Managing Editor's Journal Review form. Should be used in a context where the form warrants full
admin privileges. It will permit doing every action.
"""
pass
| apache-2.0 |
jenmud/behave-graph | behave_graph/__init__.py | 1 | 2422 | """
Setup the environment by parsing the command line options and staring
a ruruki http server.
"""
import argparse
import logging
import os
from behave.configuration import Configuration
from behave.runner import Runner, parse_features
from ruruki_eye.server import run
from behave_graph.scrape import GRAPH
from behave_graph.scrape import scrape_features
__all__ = ["load"]
def load(path):
"""
Load the given path that contains the features and steps.
:param path: Path where the feature and steps files can be found.
:type path: :class:`str`
:returns: A behave runner.
:rtype: :class:`behave.runner.Runner`
"""
try:
config = Configuration(path)
runner = Runner(config)
features = parse_features(
[f.filename for f in runner.feature_locations()]
)
scrape_features(features)
return runner
except Exception as error: # pylint: disable=broad-except
logging.exception(
"Unexpected error creating configuration %r: %r",
path, error
)
raise argparse.ArgumentTypeError(error)
def parse_arguments():
"""
Parse the command line arguments.
:returns: All the command line arguments.
:rtype: :class:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Behave dependency grapher."
)
parser.add_argument(
"-b",
"--base-dir",
default=os.getcwd(),
type=load,
help=(
"Behave base directory path "
"where features and steps can be found. "
"(default: %(default)s)"
),
)
parser.add_argument(
"--runserver",
action="store_true",
help="Start a ruruki http server.",
)
parser.add_argument(
"--address",
default="0.0.0.0",
help="Address to start the web server on. (default: %(default)s)",
)
parser.add_argument(
"--port",
type=int,
default=8000,
help=(
"Port number that the web server will accept connections on. "
"(default: %(default)d)"
),
)
return parser.parse_args()
def main():
"""
Entry point.
"""
logging.basicConfig(level=logging.INFO)
namespace = parse_arguments()
if namespace.runserver is True:
run(namespace.address, namespace.port, False, GRAPH)
| mit |
mdsafwan/Deal-My-Stuff | Lib/site-packages/django/db/models/aggregates.py | 26 | 5554 | """
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Func, Star
from django.db.models.fields import FloatField, IntegerField
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
class Aggregate(Func):
contains_aggregate = True
name = None
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
assert len(self.source_expressions) == 1
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
if c.source_expressions[0].contains_aggregate and not summarize:
name = self.source_expressions[0].name
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
c.name, name, name))
c._patch_aggregate(query) # backward-compatibility support
return c
@property
def input_field(self):
return self.source_expressions[0]
@property
def default_alias(self):
if hasattr(self.source_expressions[0], 'name'):
return '%s__%s' % (self.source_expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self):
return []
def _patch_aggregate(self, query):
"""
Helper method for patching 3rd party aggregates that do not yet support
the new way of subclassing. This method will be removed in Django 1.10.
add_to_query(query, alias, col, source, is_summary) will be defined on
legacy aggregates which, in turn, instantiates the SQL implementation of
the aggregate. In all the cases found, the general implementation of
add_to_query looks like:
def add_to_query(self, query, alias, col, source, is_summary):
klass = SQLImplementationAggregate
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
By supplying a known alias, we can get the SQLAggregate out of the
aggregates dict, and use the sql_function and sql_template attributes
to patch *this* aggregate.
"""
if not hasattr(self, 'add_to_query') or self.function is not None:
return
placeholder_alias = "_XXXXXXXX_"
self.add_to_query(query, placeholder_alias, None, None, None)
sql_aggregate = query.aggregates.pop(placeholder_alias)
if 'sql_function' not in self.extra and hasattr(sql_aggregate, 'sql_function'):
self.extra['function'] = sql_aggregate.sql_function
if hasattr(sql_aggregate, 'sql_template'):
self.extra['template'] = sql_aggregate.sql_template
class Avg(Aggregate):
function = 'AVG'
name = 'Avg'
def __init__(self, expression, **extra):
super(Avg, self).__init__(expression, output_field=FloatField(), **extra)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
class Count(Aggregate):
function = 'COUNT'
name = 'Count'
template = '%(function)s(%(distinct)s%(expressions)s)'
def __init__(self, expression, distinct=False, **extra):
if expression == '*':
expression = Star()
super(Count, self).__init__(
expression, distinct='DISTINCT ' if distinct else '', output_field=IntegerField(), **extra)
def __repr__(self):
return "{}({}, distinct={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.extra['distinct'] == '' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class Max(Aggregate):
function = 'MAX'
name = 'Max'
class Min(Aggregate):
function = 'MIN'
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
def __init__(self, expression, sample=False, **extra):
self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
super(StdDev, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'STDDEV_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
class Sum(Aggregate):
function = 'SUM'
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
def __init__(self, expression, sample=False, **extra):
self.function = 'VAR_SAMP' if sample else 'VAR_POP'
super(Variance, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'VAR_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
| apache-2.0 |
brainix/social-butterfly | channels.py | 1 | 6608 | #-----------------------------------------------------------------------------#
# channels.py #
# #
# Copyright (c) 2010-2012, Code A La Mode, original authors. #
# #
# This file is part of Social Butterfly. #
# #
# Social Butterfly is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# Social Butterfly is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Social Butterfly. If not, see: #
# <http://www.gnu.org/licenses/>. #
#-----------------------------------------------------------------------------#
"""Datastore model and public API for Google App Engine channel management."""
import datetime
import logging
import random
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.runtime import DeadlineExceededError
from config import NUM_RETRIES
_log = logging.getLogger(__name__)
class Channel(db.Model):
"""Datastore model and public API for Google App Engine channel management.
Google App Engine implements channels (similar to Comet or WebSockets) for
real-time cloud to browser communication. But App Engine only provides the
communication primitives. We need to persist additional data about the
open channels, so that we know who to broadcast the messages to.
"""
name = db.StringProperty()
datetime = db.DateTimeProperty(required=True, auto_now=True)
@classmethod
def create(cls, name=None):
"""Create a channel and return its token."""
_log.info('creating channel')
def txn():
for retry in range(NUM_RETRIES):
client_id = 'client' + str(random.randint(0, 10 ** 8 - 1))
chan = cls.get_by_key_name(client_id)
if chan is None:
chan = cls(key_name=client_id, name=name)
chan.put()
return client_id
client_id = db.run_in_transaction(txn)
if client_id is None:
_log.warning("couldn't create channel; couldn't allocate ID")
else:
token = channel.create_channel(client_id)
_countdown = 2 * 60 * 60
deferred.defer(cls.destroy, client_id, _countdown=_countdown)
_log.info('created channel %s, token %s' % (client_id, token))
return token
@classmethod
def destroy(cls, client_id):
"""Destroy the specified channel."""
_log.info('destroying channel %s' % client_id)
chan = cls.get_by_key_name(client_id)
if chan is None:
body = "couldn't destroy channel %s; already destroyed" % client_id
_log.info(body)
else:
db.delete(chan)
_log.info('destroyed channel %s' % client_id)
@classmethod
def broadcast(cls, json, name=None):
"""Schedule broadcasting the specified JSON string to all channels."""
_log.info('deferring broadcasting JSON to all connected channels')
channels = cls.all()
if name is not None:
channels = channels.filter('name =', name)
channels = channels.count(1)
if channels:
deferred.defer(cls._broadcast, json, name=name, cursor=None)
_log.info('deferred broadcasting JSON to all connected channels')
else:
body = 'not deferring broadcasting JSON (no connected channels)'
_log.info(body)
@classmethod
def _broadcast(cls, json, name=None, cursor=None):
"""Broadcast the specified JSON string to all channels."""
_log.info('broadcasting JSON to all connected channels')
keys = cls.all(keys_only=True)
if name is not None:
keys = keys.filter('name = ', name)
if cursor is not None:
keys = keys.with_cursor(cursor)
num_channels = 0
try:
for key in keys:
client_id = key.name()
channel.send_message(client_id, json)
# There's a chance that Google App Engine will throw the
# DeadlineExceededError exception at this point in the flow of
# execution. In this case, the current channel will have
# already received our JSON broadcast, but the cursor will not
# have been updated. So on the next go-around, the current
# channel will receive our JSON broadcast again. I'm just
# documenting this possibility, but it shouldn't be a big deal.
cursor = keys.cursor()
num_channels += 1
except DeadlineExceededError:
_log.info('broadcasted JSON to %s channels' % num_channels)
_log.warning("deadline; deferring broadcast to remaining channels")
deferred.defer(cls._broadcast, json, name=name, cursor=cursor)
else:
_log.info('broadcasted JSON to %s channels' % num_channels)
_log.info('broadcasted JSON to all connected channels')
@classmethod
def flush(cls):
"""Destroy all channels created over two hours ago."""
_log.info('destroying all channels over two hours old')
now = datetime.datetime.now()
timeout = datetime.timedelta(hours=2)
expiry = now - timeout
keys = cls.all(keys_only=True).filter('datetime <=', expiry)
db.delete(keys)
_log.info('destroyed all channels over two hours old')
| gpl-3.0 |
frankyrumple/ope | libs/gluon/packages/dal/pydal/adapters/ingres.py | 23 | 6571 | # -*- coding: utf-8 -*-
from .._globals import IDENTITY
from ..drivers import pyodbc
from .base import BaseAdapter
# NOTE invalid database object name (ANSI-SQL wants
# this form of name to be a delimited identifier)
INGRES_SEQNAME='ii***lineitemsequence'
class IngresAdapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME,
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME,
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RANDOM()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
if fetch_amt:
sql_s += ' FIRST %d ' % (fetch_amt, )
if lmin:
# Requires Ingres 9.2+
sql_o += ' OFFSET %d' % (lmin, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "ingres"
self._driver = pyodbc
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
connstr = uri.split(':', 1)[1]
# Simple URI processing
connstr = connstr.lstrip()
while connstr.startswith('/'):
connstr = connstr[1:]
if '=' in connstr:
# Assume we have a regular ODBC connection string and just use it
ruri = connstr
else:
# Assume only (local) dbname is passed in with OS auth
database_name = connstr
default_driver_name = 'Ingres'
vnode = '(local)'
servertype = 'ingres'
ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name)
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
# TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns
if do_connect: self.reconnect()
def create_sequence_and_triggers(self, query, table, **args):
# post create table auto inc code (if needed)
# modify table to btree for performance....
# Older Ingres releases could use rule/trigger like Oracle above.
if hasattr(table,'_primarykey'):
modify_tbl_sql = 'modify %s to btree unique on %s' % \
(table._tablename,
', '.join(["'%s'" % x for x in table.primarykey]))
self.execute(modify_tbl_sql)
else:
tmp_seqname='%s_iisq' % table._tablename
query=query.replace(INGRES_SEQNAME, tmp_seqname)
self.execute('create sequence %s' % tmp_seqname)
self.execute(query)
self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
def lastrowid(self,table):
tmp_seqname='%s_iisq' % table
self.execute('select current value for %s' % tmp_seqname)
return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
class IngresUnicodeAdapter(IngresAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NCLOB',
'json': 'NCLOB',
'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME,
'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'NCLOB',
'list:string': 'NCLOB',
'list:reference': 'NCLOB',
'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME,
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
}
| mit |
supertylerc/trigger | trigger/utils/xmltodict.py | 13 | 9350 | #!/usr/bin/env python
"Makes working with XML feel like you are working with JSON"
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.4.6'
__license__ = 'MIT'
class ParsingInterrupted(Exception): pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True):
self.path = []
self.stack = []
self.data = None
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
def startElement(self, name, attrs):
attrs = self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attrs = self.dict_constructor(
(self.attr_prefix+key, value)
for (key, value) in attrs.items())
else:
attrs = None
self.item = attrs or None
self.data = None
def endElement(self, name):
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = self.data
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
item, data = self.item, self.data
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data is not None:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = self.data = None
self.path.pop()
def characters(self, data):
if not self.data:
self.data = data
else:
self.data += self.cdata_separator + data
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
item[key] = data
return item
def parse(xml_input, encoding='utf-8', *args, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print 'path:%s item:%s' % (path, item)
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`, `key`
and `value` as positional arguments and returns a new `(key, value)` pair
where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
"""
handler = _DictSAXHandler(*args, **kwargs)
parser = expat.ParserCreate()
parser.ordered_attributes = True
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
try:
parser.ParseFile(xml_input)
except (TypeError, AttributeError):
if isinstance(xml_input, _unicode):
xml_input = xml_input.encode(encoding)
parser.Parse(xml_input, True)
return handler.item
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
root=True,
preprocessor=None):
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if not isinstance(value, (list, tuple)):
value = [value]
if root and len(value) > 1:
raise ValueError('document with multiple roots')
for v in value:
if v is None:
v = OrderedDict()
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
content_handler.startElement(key, AttributesImpl(attrs))
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, False, preprocessor)
if cdata is not None:
content_handler.characters(cdata)
content_handler.endElement(key)
def unparse(item, output=None, encoding='utf-8', **kwargs):
((key, value),) = item.items()
must_return = False
if output == None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), sys.stdout)
return True
try:
root = parse(sys.stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass
| bsd-3-clause |
Jumpscale/core9 | JumpScale9/tools/develop/CodeDirs.py | 1 | 5548 | from js9 import j
JSBASE = j.application.jsbase_get_class()
class CodeDirs(JSBASE):
def __init__(self):
JSBASE.__init__(self)
self.path = j.dirs.CODEDIR
self.load()
def load(self):
data = j.core.state.stateGetFromDict("develop", "codedirs", "")
self.tree = j.data.treemanager.get(data=data)
self.tree.setDeleteState() # set all on deleted state
types = j.sal.fs.listDirsInDir(j.dirs.CODEDIR, False, True)
# types2 = []
for ttype in types:
self.tree.set(ttype, cat="type")
# types2.append(currootTree)
if ttype[0] == "." or ttype[0] == "_":
continue
accounts = j.sal.fs.listDirsInDir("%s/%s" % (j.dirs.CODEDIR, ttype), False, True)
for account in accounts:
if account[0] == "." or account[0] == "_":
continue
path = "%s.%s" % (ttype, account)
self.tree.set(path, cat="account")
repos = j.sal.fs.listDirsInDir("%s/%s/%s" % (j.dirs.CODEDIR, ttype, account), False, True)
for repo in repos:
if not repo.startswith(".") and not account.startswith("."):
path = "%s.%s.%s" % (ttype, account, repo)
self.tree.set(path, cat="repo", item=CodeDir(self, ttype, account, repo))
self.tree.removeDeletedItems() # make sure that the ones no longer there are deleted
# @property
# def codedirs(self):
# return self.tree.find("", getItems=True)
# def codeDirsGetAsStringList(self):
# res = []
# for codedir in self.codedirs:
# res.append(str(codedir))
# res.sort()
# return res
def getActiveCodeDirs(self):
res = []
for item in self.tree.find(cat="repo"):
if item.selected:
# path=j.dirs.CODEDIR+"/"+item.path.replace(".","/")
ttype, account, name = item.path.split(".")
res.append(CodeDir(self, ttype=ttype, account=account, name=name))
return res
def get(self, type, account, reponame):
return CodeDir(self, type, account, reponame)
def codeDirGet(self, reponame, account=None, die=True):
res = []
for item in self.self.tree.find("", getItems=True):
if account is None or item.account == account:
if item.name == reponame:
for codedirget in develtools:
self.logger.debug(codedirget)
from IPython import embed
embed(colors='Linux')
CodeDir(self, ttype, account, reponame)
res.append(item)
if len(res) == 0:
if die is False:
return None
raise j.exceptions.Input("did not find codedir: %s:%s" % (account, reponame))
if len(res) > 1:
raise j.exceptions.Input("found more than 1 codedir: %s:%s" % (account, reponame))
return res[0]
def save(self):
j.core.state.stateSetInDict("develop", "codedirs", self.tree.dumps())
# def selectionGet(self):
# coderepos = j.core.state.configGetFromDict("developtools", "coderepos", default=[])
# res = []
# for account, reponame in coderepos:
# res.append(self.codeDirGet(account=account, reponame=reponame))
# return res
# def _selectionGet(self):
# """
# will return as position in list e.g. [2,3] would mean position 3&4 in sorted list of the coderepo's
# """
# sel0 = self.codeDirsGetAsStringList()
# sel = [str(item) for item in self.selectionGet()]
# res = []
# for item in sel:
# # is string in selection
# try:
# col = sel0.index(item)
# # means it exists
# res.append(col)
# except:
# pass
# return res
#
# def _selectionSet(self, selection):
# slist = self.codeDirsGetAsStringList()
# res = []
# for item in selection:
# selectedItem = slist[item]
# account, name = selectedItem.split(":", 1)
# account = account.strip()
# name = name.strip()
# res.append([account, name])
# j.core.state.configSetInDict("developtools", "coderepos", res)
# def selectionSet(self, codedir):
# """
# will set the code dir as selected in the jumpscale config file
# """
# if not self.selectionExists(codedir):
# items = j.core.state.configGetFromDict("developtools", "coderepos", default=[])
# items.append([codedir.account, codedir.name])
# j.core.state.configSetInDict("developtools", "coderepos", items)
# j.core.state.configSave()
#
# def selectionExists(self, codedir):
# return str(codedir) in self.codeDirsGetAsStringList()
def __repr__(self):
return self.__str__()
def __str__(self):
return ("%s" % self.tree)
class CodeDir(JSBASE):
def __init__(self, codedirs, ttype, account, name):
JSBASE.__init__(self)
self.path = j.sal.fs.joinPaths(codedirs.path, ttype, account, name)
self.account = account
self.type = ttype
self.name = name
def __repr__(self):
return self.__str__()
def __str__(self):
return ("%-22s : %s" % (self.account, self.name))
| apache-2.0 |
Fusion-Rom/android_external_chromium_org | native_client_sdk/src/tools/lib/quote.py | 106 | 2116 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def quote(input_str, specials, escape='\\'):
"""Returns a quoted version of |str|, where every character in the
iterable |specials| (usually a set or a string) and the escape
character |escape| is replaced by the original character preceded by
the escape character."""
assert len(escape) == 1
# Since escape is used in replacement pattern context, so we need to
# ensure that it stays a simple literal and cannot affect the \1
# that will follow it.
if escape == '\\':
escape = '\\\\'
if len(specials) > 0:
return re.sub(r'(' + r'|'.join(specials)+r'|'+escape + r')',
escape + r'\1', input_str)
return re.sub(r'(' + escape + r')', escape + r'\1', input_str)
def unquote(input_str, specials, escape='\\'):
"""Splits the input string |input_str| where special characters in
the input |specials| are, if not quoted by |escape|, used as
delimiters to split the string. The returned value is a list of
strings of alternating non-specials and specials used to break the
string. The list will always begin with a possibly empty string of
non-specials, but may end with either specials or non-specials."""
assert len(escape) == 1
out = []
cur_out = []
cur_special = False
lit_next = False
for c in input_str:
if cur_special:
lit_next = (c == escape)
if c not in specials or lit_next:
cur_special = False
out.append(''.join(cur_out))
if not lit_next:
cur_out = [c]
else:
cur_out = []
else:
cur_out.append(c)
else:
if lit_next:
cur_out.append(c)
lit_next = False
else:
lit_next = c == escape
if c not in specials:
if not lit_next:
cur_out.append(c)
else:
out.append(''.join(cur_out))
cur_out = [c]
cur_special = True
out.append(''.join(cur_out))
return out
| bsd-3-clause |
PLXDev/PLX | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
marioaugustorama/yowsup | yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_promote.py | 61 | 1130 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups_participants import ParticipantsGroupsIqProtocolEntity
class PromoteParticipantsIqProtocolEntity(ParticipantsGroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g2", to="{{group_jid}}">
<promote>
<participant jid="{{jid}}"></participant>
<participant jid="{{jid}}"></participant>
</promote>
</iq>
'''
def __init__(self, group_jid, participantList, _id = None):
super(PromoteParticipantsIqProtocolEntity, self).__init__(group_jid, participantList, "promote", _id = _id)
@staticmethod
def fromProtocolTreeNode(node):
entity = super(PromoteParticipantsIqProtocolEntity, PromoteParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = PromoteParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getChild("promote").getAllChildren():
participantList.append(participantNode["jid"])
entity.setProps(node.getAttributeValue("to"), participantList)
return entity
| gpl-3.0 |
pchauncey/ansible | lib/ansible/module_utils/ovirt.py | 16 | 25727 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
try:
from enum import Enum # enum is a ovirtsdk4 requirement
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.0.0')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.0.0 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
res = {}
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
def convert_value(value):
nested = False
if isinstance(value, sdk.Struct):
return get_dict_of_struct(value)
elif isinstance(value, Enum) or isinstance(value, datetime):
return str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
try:
value = connection.follow_link(value)
nested = True
except sdk.Error:
value = []
ret = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested:
ret.append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, convert_value(getattr(i, attr)))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None),
ret.append(nested_obj)
elif isinstance(i, Enum):
ret.append(str(i))
else:
ret.append(i)
return ret
else:
return value
if struct is not None:
for key, value in struct.__dict__.items():
if value is None:
continue
key = remove_underscore(key)
res[key] = convert_value(value)
return res
def engine_version(connection):
"""
Return string representation of oVirt engine version.
"""
engine_api = connection.system_service().get()
engine_version = engine_api.product_info.version
return '%s.%s' % (engine_version.major, engine_version.minor)
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
return sdk.Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
headers=auth.get('headers', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2, ignore_case=False):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
if ignore_case:
return param1.lower() == param2.lower()
return param1 == param2
return True
def search_by_attributes(service, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search=' and '.join('{}={}'.format(k, v) for k, v in kwargs.items())
)
else:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search="name={name}".format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def get_entity(service):
"""
Ignore SDK Error in case of getting an entity from service.
"""
entity = None
try:
entity = service.get()
except sdk.Error:
# We can get here 404, we should ignore it, in case
# of removing entity for example.
pass
return entity
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
"""
Search an entity ID by it's name.
"""
entity = search_by_name(service, name)
if entity is not None:
return entity.id
if raise_error:
raise Exception("Entity '%s' was not found." % name)
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = get_entity(service)
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if none of the conditions apply:
time.sleep(float(poll_interval))
raise Exception("Timeout exceed while waiting on result state of the entity.")
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_facts_full_argument_spec(**kwargs):
"""
Extend parameters of facts module with parameters which are common to all
oVirt facts modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
def engine_supported(connection, version):
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
def check_support(version, connection, module, params):
"""
Check if parameters used by user are supported by oVirt Python SDK
and oVirt engine.
"""
api_version = LooseVersion(engine_version(connection))
version = LooseVersion(version)
for param in params:
if module.params.get(param) is not None:
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
return True
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
self._diff = {'after': dict(), 'before': dict()}
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def diff_update(self, after, update):
for k, v in update.items():
if isinstance(v, collections.Mapping):
after[k] = self.diff_update(after.get(k, dict()), v)
else:
after[k] = update[k]
return after
def create(
self,
entity=None,
result_state=None,
fail_condition=lambda e: False,
search_params=None,
update_params=None,
**kwargs
):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param update_params: The params which should be passed to update method.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
new_entity = self.build_entity()
if not self._module.check_mode:
update_params = update_params or {}
updated_entity = entity_service.update(
new_entity,
**update_params
)
self.post_update(entity)
# Update diffs only if user specified --diff parameter,
# so we don't useless overload API:
if self._module._diff:
before = get_dict_of_struct(
entity,
self._connection,
fetch_nested=True,
attributes=['name'],
)
after = before.copy()
self.diff_update(after, get_dict_of_struct(new_entity))
self._diff['before'] = before
self._diff['after'] = after
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
def state_condition(entity):
return entity
if result_state:
def state_condition(entity):
return entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def entity_name(self, entity):
return "{e_type} '{e_name}'".format(
e_type=type(entity).__name__.lower(),
e_name=getattr(entity, 'name', None),
)
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def wait_for_import(self):
if self._module.params['wait']:
start = time.time()
timeout = self._module.params['timeout']
poll_interval = self._module.params['poll_interval']
while time.time() < start + timeout:
entity = self.search_entity()
if entity:
return entity
time.sleep(poll_interval)
def search_entity(self, search_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = get_entity(self._service.service(self._module.params['id']))
elif search_params is not None:
entity = search_by_attributes(self._service, **search_params)
elif self._module.params.get('name') is not None:
entity = search_by_attributes(self._service, name=self._module.params['name'])
return entity
| gpl-3.0 |
sogelink/ansible | lib/ansible/utils/module_docs_fragments/nxos.py | 87 | 4041 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(nxapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
required: false
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the nxapi authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(nxapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
NX-API can be slow to return on long-running commands (sh mac, sh bgp, etc).
require: false
default: 10
version_added: 2.3
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is only used for the I(cli)
transport. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over cli (ssh) or nxapi.
required: true
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=nxapi), otherwise this value is ignored.
required: false
default: no
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates. If the transport
argument is not nxapi, this value is ignored.
choices: ['yes', 'no']
provider:
description:
- Convenience method that allows all I(nxos) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py | 102 | 14122 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
# pylint:disable=protected-access
import warnings
import re
import sys
from . import base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import _ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info[0] == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value=None):
if value is None:
value = {}
self._element = element
dict.__init__(self, value) # pylint:disable=non-parent-init-called
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = Comment
# self.fragmentClass = builder.DocumentFragment
base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
assert parent is None or parent is self.document
assert self.document._elementTree is None
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our initial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
comment = self.commentClass(comment_token["data"])
root.addprevious(comment._element)
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| mit |
naturali/tensorflow | tensorflow/python/summary/impl/event_file_loader_test.py | 18 | 3171 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for event_file_loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.summary.impl import event_file_loader
class EventFileLoaderTest(test_util.TensorFlowTestCase):
# A record containing a simple event.
RECORD = (b'\x18\x00\x00\x00\x00\x00\x00\x00\xa3\x7fK"\t\x00\x00\xc0%\xddu'
b'\xd5A\x1a\rbrain.Event:1\xec\xf32\x8d')
def _WriteToFile(self, filename, data):
with open(filename, 'ab') as f:
f.write(data)
def _LoaderForTestFile(self, filename):
return event_file_loader.EventFileLoader(
os.path.join(self.get_temp_dir(), filename))
def testEmptyEventFile(self):
filename = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()).name
self._WriteToFile(filename, b'')
loader = self._LoaderForTestFile(filename)
self.assertEqual(len(list(loader.Load())), 0)
def testSingleWrite(self):
filename = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()).name
self._WriteToFile(filename, EventFileLoaderTest.RECORD)
loader = self._LoaderForTestFile(filename)
events = list(loader.Load())
self.assertEqual(len(events), 1)
self.assertEqual(events[0].wall_time, 1440183447.0)
self.assertEqual(len(list(loader.Load())), 0)
def testMultipleWrites(self):
filename = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()).name
self._WriteToFile(filename, EventFileLoaderTest.RECORD)
loader = self._LoaderForTestFile(filename)
self.assertEqual(len(list(loader.Load())), 1)
self._WriteToFile(filename, EventFileLoaderTest.RECORD)
self.assertEqual(len(list(loader.Load())), 1)
def testMultipleLoads(self):
filename = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()).name
self._WriteToFile(filename, EventFileLoaderTest.RECORD)
loader = self._LoaderForTestFile(filename)
loader.Load()
loader.Load()
self.assertEqual(len(list(loader.Load())), 1)
def testMultipleWritesAtOnce(self):
filename = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()).name
self._WriteToFile(filename, EventFileLoaderTest.RECORD)
self._WriteToFile(filename, EventFileLoaderTest.RECORD)
loader = self._LoaderForTestFile(filename)
self.assertEqual(len(list(loader.Load())), 2)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
RO-ny9/python-for-android | python-modules/twisted/twisted/protocols/dict.py | 61 | 10737 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Dict client protocol implementation.
@author: Pavel Pergamenshchik
"""
from twisted.protocols import basic
from twisted.internet import defer, protocol
from twisted.python import log
from StringIO import StringIO
def parseParam(line):
"""Chew one dqstring or atom from beginning of line and return (param, remaningline)"""
if line == '':
return (None, '')
elif line[0] != '"': # atom
mode = 1
else: # dqstring
mode = 2
res = ""
io = StringIO(line)
if mode == 2: # skip the opening quote
io.read(1)
while 1:
a = io.read(1)
if a == '"':
if mode == 2:
io.read(1) # skip the separating space
return (res, io.read())
elif a == '\\':
a = io.read(1)
if a == '':
return (None, line) # unexpected end of string
elif a == '':
if mode == 1:
return (res, io.read())
else:
return (None, line) # unexpected end of string
elif a == ' ':
if mode == 1:
return (res, io.read())
res += a
def makeAtom(line):
"""Munch a string into an 'atom'"""
# FIXME: proper quoting
return filter(lambda x: not (x in map(chr, range(33)+[34, 39, 92])), line)
def makeWord(s):
mustquote = range(33)+[34, 39, 92]
result = []
for c in s:
if ord(c) in mustquote:
result.append("\\")
result.append(c)
s = "".join(result)
return s
def parseText(line):
if len(line) == 1 and line == '.':
return None
else:
if len(line) > 1 and line[0:2] == '..':
line = line[1:]
return line
class Definition:
"""A word definition"""
def __init__(self, name, db, dbdesc, text):
self.name = name
self.db = db
self.dbdesc = dbdesc
self.text = text # list of strings not terminated by newline
class DictClient(basic.LineReceiver):
"""dict (RFC2229) client"""
data = None # multiline data
MAX_LENGTH = 1024
state = None
mode = None
result = None
factory = None
def __init__(self):
self.data = None
self.result = None
def connectionMade(self):
self.state = "conn"
self.mode = "command"
def sendLine(self, line):
"""Throw up if the line is longer than 1022 characters"""
if len(line) > self.MAX_LENGTH - 2:
raise ValueError("DictClient tried to send a too long line")
basic.LineReceiver.sendLine(self, line)
def lineReceived(self, line):
try:
line = line.decode("UTF-8")
except UnicodeError: # garbage received, skip
return
if self.mode == "text": # we are receiving textual data
code = "text"
else:
if len(line) < 4:
log.msg("DictClient got invalid line from server -- %s" % line)
self.protocolError("Invalid line from server")
self.transport.LoseConnection()
return
code = int(line[:3])
line = line[4:]
method = getattr(self, 'dictCode_%s_%s' % (code, self.state), self.dictCode_default)
method(line)
def dictCode_default(self, line):
"""Unkown message"""
log.msg("DictClient got unexpected message from server -- %s" % line)
self.protocolError("Unexpected server message")
self.transport.loseConnection()
def dictCode_221_ready(self, line):
"""We are about to get kicked off, do nothing"""
pass
def dictCode_220_conn(self, line):
"""Greeting message"""
self.state = "ready"
self.dictConnected()
def dictCode_530_conn(self):
self.protocolError("Access denied")
self.transport.loseConnection()
def dictCode_420_conn(self):
self.protocolError("Server temporarily unavailable")
self.transport.loseConnection()
def dictCode_421_conn(self):
self.protocolError("Server shutting down at operator request")
self.transport.loseConnection()
def sendDefine(self, database, word):
"""Send a dict DEFINE command"""
assert self.state == "ready", "DictClient.sendDefine called when not in ready state"
self.result = None # these two are just in case. In "ready" state, result and data
self.data = None # should be None
self.state = "define"
command = "DEFINE %s %s" % (makeAtom(database.encode("UTF-8")), makeWord(word.encode("UTF-8")))
self.sendLine(command)
def sendMatch(self, database, strategy, word):
"""Send a dict MATCH command"""
assert self.state == "ready", "DictClient.sendMatch called when not in ready state"
self.result = None
self.data = None
self.state = "match"
command = "MATCH %s %s %s" % (makeAtom(database), makeAtom(strategy), makeAtom(word))
self.sendLine(command.encode("UTF-8"))
def dictCode_550_define(self, line):
"""Invalid database"""
self.mode = "ready"
self.defineFailed("Invalid database")
def dictCode_550_match(self, line):
"""Invalid database"""
self.mode = "ready"
self.matchFailed("Invalid database")
def dictCode_551_match(self, line):
"""Invalid strategy"""
self.mode = "ready"
self.matchFailed("Invalid strategy")
def dictCode_552_define(self, line):
"""No match"""
self.mode = "ready"
self.defineFailed("No match")
def dictCode_552_match(self, line):
"""No match"""
self.mode = "ready"
self.matchFailed("No match")
def dictCode_150_define(self, line):
"""n definitions retrieved"""
self.result = []
def dictCode_151_define(self, line):
"""Definition text follows"""
self.mode = "text"
(word, line) = parseParam(line)
(db, line) = parseParam(line)
(dbdesc, line) = parseParam(line)
if not (word and db and dbdesc):
self.protocolError("Invalid server response")
self.transport.loseConnection()
else:
self.result.append(Definition(word, db, dbdesc, []))
self.data = []
def dictCode_152_match(self, line):
"""n matches found, text follows"""
self.mode = "text"
self.result = []
self.data = []
def dictCode_text_define(self, line):
"""A line of definition text received"""
res = parseText(line)
if res == None:
self.mode = "command"
self.result[-1].text = self.data
self.data = None
else:
self.data.append(line)
def dictCode_text_match(self, line):
"""One line of match text received"""
def l(s):
p1, t = parseParam(s)
p2, t = parseParam(t)
return (p1, p2)
res = parseText(line)
if res == None:
self.mode = "command"
self.result = map(l, self.data)
self.data = None
else:
self.data.append(line)
def dictCode_250_define(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.defineDone(t)
def dictCode_250_match(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.matchDone(t)
def protocolError(self, reason):
"""override to catch unexpected dict protocol conditions"""
pass
def dictConnected(self):
"""override to be notified when the server is ready to accept commands"""
pass
def defineFailed(self, reason):
"""override to catch reasonable failure responses to DEFINE"""
pass
def defineDone(self, result):
"""override to catch succesful DEFINE"""
pass
def matchFailed(self, reason):
"""override to catch resonable failure responses to MATCH"""
pass
def matchDone(self, result):
"""override to catch succesful MATCH"""
pass
class InvalidResponse(Exception):
pass
class DictLookup(DictClient):
"""Utility class for a single dict transaction. To be used with DictLookupFactory"""
def protocolError(self, reason):
if not self.factory.done:
self.factory.d.errback(InvalidResponse(reason))
self.factory.clientDone()
def dictConnected(self):
if self.factory.queryType == "define":
apply(self.sendDefine, self.factory.param)
elif self.factory.queryType == "match":
apply(self.sendMatch, self.factory.param)
def defineFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def defineDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
def matchFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def matchDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
class DictLookupFactory(protocol.ClientFactory):
"""Utility factory for a single dict transaction"""
protocol = DictLookup
done = None
def __init__(self, queryType, param, d):
self.queryType = queryType
self.param = param
self.d = d
self.done = 0
def clientDone(self):
"""Called by client when done."""
self.done = 1
del self.d
def clientConnectionFailed(self, connector, error):
self.d.errback(error)
def clientConnectionLost(self, connector, error):
if not self.done:
self.d.errback(error)
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
return p
def define(host, port, database, word):
"""Look up a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("define", (database, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
def match(host, port, database, strategy, word):
"""Match a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("match", (database, strategy, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
| apache-2.0 |
ChromiumWebApps/chromium | build/android/pylib/cmd_helper.py | 26 | 3711 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper for subprocess to make calling shell commands easier."""
import logging
import pipes
import signal
import subprocess
import tempfile
from utils import timeout_retry
def Popen(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
return subprocess.Popen(
args=args, cwd=cwd, stdout=stdout, stderr=stderr,
shell=shell, close_fds=True, env=env,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
pipe = Popen(args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd,
env=env)
pipe.communicate()
return pipe.wait()
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
logging.info(str(args) + ' ' + (cwd or ''))
return Call(args, cwd=cwd)
def GetCmdOutput(args, cwd=None, shell=False):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
(_, output) = GetCmdStatusAndOutput(args, cwd, shell)
return output
def GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The 2-tuple (exit code, output).
"""
if isinstance(args, basestring):
args_repr = args
if not shell:
raise Exception('string args must be run with shell=True')
elif shell:
raise Exception('array args must be run with shell=False')
else:
args_repr = ' '.join(map(pipes.quote, args))
s = '[host]'
if cwd:
s += ':' + cwd
s += '> ' + args_repr
logging.info(s)
tmpout = tempfile.TemporaryFile(bufsize=0)
tmperr = tempfile.TemporaryFile(bufsize=0)
exit_code = Call(args, cwd=cwd, stdout=tmpout, stderr=tmperr, shell=shell)
tmperr.seek(0)
stderr = tmperr.read()
tmperr.close()
if stderr:
logging.critical(stderr)
tmpout.seek(0)
stdout = tmpout.read()
tmpout.close()
if len(stdout) > 4096:
logging.debug('Truncated output:')
logging.debug(stdout[:4096])
return (exit_code, stdout)
def GetCmdStatusAndOutputWithTimeoutAndRetries(args, timeout, retries):
"""Executes a subprocess with a timeout and retries.
Args:
args: List of arguments to the program, the program to execute is the first
element.
timeout: the timeout in seconds.
retries: the number of retries.
Returns:
The 2-tuple (exit code, output).
"""
return timeout_retry.Run(GetCmdStatusAndOutput, timeout, retries, [args])
| bsd-3-clause |
eykd/fuzzy-octo-bear | tests/test_map_loader.py | 1 | 1227 | from unittest import TestCase
from ensure import ensure
from path import path
from fuzzy.map import load_game_map
from fuzzy.rooms import Room
from fuzzy.exits import Exit
PATH = path(__file__).abspath().dirname()
class MapLoaderTests(TestCase):
def setUp(self):
self.filename = PATH / 'rooms.yaml'
def test_it_should_construct_a_map_from_the_yaml_file(self):
start_room = load_game_map(self.filename)
ensure(start_room).is_a(Room)
ensure(start_room.exits).has_length(2)
ensure(start_room.exits).is_a(list).of(Exit)
ensure(start_room.exits[0].target).is_a(Room)
ensure(start_room.exits[0].target).is_not(start_room)
room_3 = start_room.exits[1].target
ensure(room_3.exits).has_length(4)
ensure(room_3.exits).is_a(list).of(Exit)
room_6 = room_3.exits[2].target
ensure(room_6).is_a(Room)
ensure(room_6.exits).has_length(2)
ensure(room_6.description).equals("A nondescript room")
room_7 = room_3.exits[3].target
ensure(room_7).is_a(Room)
ensure(room_7.exits).has_length(2)
ensure(room_7.description).equals("A nondescript room")
ensure(room_6).is_not(room_7)
| gpl-2.0 |
M0nica/python-foundations-hw | 04/04-homework_graded.py | 1 | 46139 | ########
#Graded = 25/25
# Homework 3
#
# MAKE SURE YOU ARE RUNNING THIS WITH PYTHON 3!
# Python 2 will give a "Non-ASCII character" error
#
# Either use workon/mkvirtualenv to create an
# environment or use the python3 command
#
########
########
#
# Here is a programmer!
#
########
programmer = {
'name': 'Christine',
'fish': 100,
'languages': ['C++', 'Ruby', 'Java', 'Python' ]
}
# 1. What kind of data structure (a.k.a. type) is programmer? Print it.
print(type(programmer))
# 2. What keys does programmer have? Print it.
print(programmer.keys())
# 3. Print the programmer's name.
print(programmer['name'])
# 4. If the programmer has more than 30 fish, print "The programmer owns a lot of fish." If the programmer has 0 fish, say "the programmer has no fish." If the programmer has between 1 and 30 fish, print "the programmer has a few fish."
if programmer['fish'] > 30:
print("The programmer owns a lot of fish.")
elif progammer['fish'] > 0:
print("the programmer has a few fish.")
else:
print("the programmer has no fish.")
# 5. Print the sentence, "{programmer's name} knows {number of languages} languages")
print(programmer['name'], "knows", len(programmer['languages']), "languages")
# 6. Use a loop to print each language the programmer knows
for language in programmer['languages']:
print(language)
########
#
# Here is a bunch of workers!
#
########
company = {
'name': 'ACME Product Production Program',
'coders': [
{ 'name': 'Lady Macbeth', 'languages': ['C++', 'Ruby', 'Java', 'Python' ] },
{ 'name': 'Lothario', 'languages': ['C++'] },
{ 'name': 'Ophelia', 'languages': [ 'Ruby', 'Erlang', 'Python' ] },
{ 'name': 'Mercutio', 'languages': ['ASM', 'Python' ] }
],
'managers': [
{ 'name': 'Alpha' },
{ 'name': 'Beta' },
{ 'name': 'Gamma' },
{ 'name': 'Delta' }
]
}
# 7. What type is the company variable? What are its keys?
print(type(company))
print(company.keys())
# 8. What data structure (a.k.a. type) is the 'coders' part of company?
print(type(company['coders']))
# 9. How many coders does the company have?
print(len(company['coders']))
# 10. Print the name of each manager.
for manager in company['managers']:
print(manager['name'])
# 11. Print the number of languages each coder knows.
for coder in company['coders']:
print(len(coder['languages']))
########
#
# Search results from Spotify for an artist named "Kendrick"
# https://api.spotify.com/v1/search?query=kendrick&limit=20&type=artist
#
########
artist_search_result = {'artists': {'offset': 0, 'next': 'https://api.spotify.com/v1/search?query=kendrick&offset=20&limit=20&type=artist', 'limit': 20, 'href': 'https://api.spotify.com/v1/search?query=kendrick&offset=0&limit=20&type=artist', 'previous': None, 'items': [{'images': [{'height': 1000, 'url': 'https://i.scdn.co/image/b1947120c60a8f2886c98faf52a61895821c7cf0', 'width': 1000}, {'height': 640, 'url': 'https://i.scdn.co/image/c50721f32900d561d44f38006208ab69717fe1f9', 'width': 640}, {'height': 200, 'url': 'https://i.scdn.co/image/762628b9c2bf991e6f9325522dab32c0cf7c06a2', 'width': 200}, {'height': 64, 'url': 'https://i.scdn.co/image/876101e8b1a981d5d6f9257f0f6ddd15087bdfd5', 'width': 64}], 'genres': ['alternative hip hop'], 'href': 'https://api.spotify.com/v1/artists/2YZyLoL8N0Wb9xBt1NhZWg', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2YZyLoL8N0Wb9xBt1NhZWg'}, 'popularity': 84, 'type': 'artist', 'followers': {'href': None, 'total': 2454724}, 'name': 'Kendrick Lamar', 'uri': 'spotify:artist:2YZyLoL8N0Wb9xBt1NhZWg', 'id': '2YZyLoL8N0Wb9xBt1NhZWg'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/b6e825eb7039bb792a65b484b3d56064fb629ec8', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/1229558513a6881b2635c4b2954f8bd709415ae5', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/1301ae674a679c1865b2ffc0702be296d86224fc', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/6xfqnpe2HnLVUaYXs2F8YS', 'external_urls': {'spotify': 'https://open.spotify.com/artist/6xfqnpe2HnLVUaYXs2F8YS'}, 'popularity': 57, 'type': 'artist', 'followers': {'href': None, 'total': 84080}, 'name': 'Anna Kendrick', 'uri': 'spotify:artist:6xfqnpe2HnLVUaYXs2F8YS', 'id': '6xfqnpe2HnLVUaYXs2F8YS'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1iApxRdcW8Uok4htrDrvdY', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1iApxRdcW8Uok4htrDrvdY'}, 'popularity': 45, 'type': 'artist', 'followers': {'href': None, 'total': 1764}, 'name': 'Tech N9ne feat. Kendrick Lamar, ¡Mayday!, Kendall Morgan', 'uri': 'spotify:artist:1iApxRdcW8Uok4htrDrvdY', 'id': '1iApxRdcW8Uok4htrDrvdY'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/3130aee8b99f3fd47e32c704f146eeafc2ad01fc', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/4547ade74391dcd3b3ca38afe820e5f44a5bddc7', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/5ae90745618ea45fe0e0e832feebecaab3dc2d14', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1cjrBtunBfOLbXQ0OK1yEY', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1cjrBtunBfOLbXQ0OK1yEY'}, 'popularity': 41, 'type': 'artist', 'followers': {'href': None, 'total': 7}, 'name': 'Edgar Kendricks', 'uri': 'spotify:artist:1cjrBtunBfOLbXQ0OK1yEY', 'id': '1cjrBtunBfOLbXQ0OK1yEY'}, {'images': [{'height': 1280, 'url': 'https://i.scdn.co/image/664f1a004773bd74a4ff5104818e4f383ef95a5e', 'width': 676}, {'height': 1212, 'url': 'https://i.scdn.co/image/d5eff2f40af987b8794a43b6df78a47f41e4dc8f', 'width': 640}, {'height': 379, 'url': 'https://i.scdn.co/image/04eacbd2e9a333aff1deb625512fef76cd60c754', 'width': 200}, {'height': 121, 'url': 'https://i.scdn.co/image/745fd75c3bb492e40f93835e233f3e80d4ab513a', 'width': 64}], 'genres': ['motown'], 'href': 'https://api.spotify.com/v1/artists/2Uuon75BhnuuxdKLYn4wHn', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2Uuon75BhnuuxdKLYn4wHn'}, 'popularity': 39, 'type': 'artist', 'followers': {'href': None, 'total': 5310}, 'name': 'Eddie Kendricks', 'uri': 'spotify:artist:2Uuon75BhnuuxdKLYn4wHn', 'id': '2Uuon75BhnuuxdKLYn4wHn'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/14bfe97f0b355da905a49255991be8d72c96d49c', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/6c8c92a391746de3ac3f630180c74c7e363d0c97', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/d17dc6566044566cee5ad0b529df6320a0dcb065', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/2cKOuZYoNGwJ91GSVhUV9g', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2cKOuZYoNGwJ91GSVhUV9g'}, 'popularity': 29, 'type': 'artist', 'followers': {'href': None, 'total': 81}, 'name': 'Kendrick', 'uri': 'spotify:artist:2cKOuZYoNGwJ91GSVhUV9g', 'id': '2cKOuZYoNGwJ91GSVhUV9g'}, {'images': [{'height': 635, 'url': 'https://i.scdn.co/image/70292a01a38948fa70e00b175e8d60ee33a40bc3', 'width': 950}, {'height': 428, 'url': 'https://i.scdn.co/image/b9537b37f129c1be5e8f3ba4efe3cac5b25f7636', 'width': 640}, {'height': 134, 'url': 'https://i.scdn.co/image/e73e70c68dc9d40799fa1a7865c7c2b56a56ae32', 'width': 200}, {'height': 43, 'url': 'https://i.scdn.co/image/3dccb1ab6811e5d59dd71ef664621e7f0aacd0b2', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/3xidVCWg60r8Wdm6g9VCux', 'external_urls': {'spotify': 'https://open.spotify.com/artist/3xidVCWg60r8Wdm6g9VCux'}, 'popularity': 36, 'type': 'artist', 'followers': {'href': None, 'total': 1031}, 'name': 'Kendrick Scott', 'uri': 'spotify:artist:3xidVCWg60r8Wdm6g9VCux', 'id': '3xidVCWg60r8Wdm6g9VCux'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7Bin9s9lePTNx57vB5rHW8', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7Bin9s9lePTNx57vB5rHW8'}, 'popularity': 24, 'type': 'artist', 'followers': {'href': None, 'total': 1}, 'name': 'Kendrick Small', 'uri': 'spotify:artist:7Bin9s9lePTNx57vB5rHW8', 'id': '7Bin9s9lePTNx57vB5rHW8'}, {'images': [{'height': 290, 'url': 'https://i.scdn.co/image/af863b35263ff14eb78218f371bef8a0e76f1de5', 'width': 1000}, {'height': 186, 'url': 'https://i.scdn.co/image/7f86627d478319b749db28e5029e8ef08f330759', 'width': 640}, {'height': 58, 'url': 'https://i.scdn.co/image/7f751ccee89a1f1b84d1a0cd1d37437c41bde338', 'width': 200}, {'height': 19, 'url': 'https://i.scdn.co/image/510ff1b14b3ae7688012ed56a4d201e2a6333e8e', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1srLlKy0yVmQorLl9PhXbS', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1srLlKy0yVmQorLl9PhXbS'}, 'popularity': 30, 'type': 'artist', 'followers': {'href': None, 'total': 3222}, 'name': 'Graham Kendrick', 'uri': 'spotify:artist:1srLlKy0yVmQorLl9PhXbS', 'id': '1srLlKy0yVmQorLl9PhXbS'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/801f01bc6446f7b97d656ee3a86702c642633c4f', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/ec3937428234fca86329588823b68b0e81aa2251', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/567b432948b028ed45d637d972c0058f2bf1bb91', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/5DOCFpRL15EQCkZDU3RcP8', 'external_urls': {'spotify': 'https://open.spotify.com/artist/5DOCFpRL15EQCkZDU3RcP8'}, 'popularity': 23, 'type': 'artist', 'followers': {'href': None, 'total': 69}, 'name': 'Temps & Eddie Kendricks', 'uri': 'spotify:artist:5DOCFpRL15EQCkZDU3RcP8', 'id': '5DOCFpRL15EQCkZDU3RcP8'}, {'images': [{'height': 667, 'url': 'https://i.scdn.co/image/555a8e287d0b50921f43773779ccc99f4eb14bd8', 'width': 1000}, {'height': 427, 'url': 'https://i.scdn.co/image/3cf168aefd1633f40f7021e44d2106d4a3c34f8c', 'width': 640}, {'height': 133, 'url': 'https://i.scdn.co/image/9d51baf1e5d9bd05e7e20117f6f0bbac4ede8ad2', 'width': 200}, {'height': 43, 'url': 'https://i.scdn.co/image/030909e0a8dc938af89c7a030e0b204fbd46f11d', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/0IyuDlCVbMa3TAoVaDKEeL', 'external_urls': {'spotify': 'https://open.spotify.com/artist/0IyuDlCVbMa3TAoVaDKEeL'}, 'popularity': 19, 'type': 'artist', 'followers': {'href': None, 'total': 1471}, 'name': 'Kendrick Scott Oracle', 'uri': 'spotify:artist:0IyuDlCVbMa3TAoVaDKEeL', 'id': '0IyuDlCVbMa3TAoVaDKEeL'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/6FnUiliI9F1f2V9THnXxpu', 'external_urls': {'spotify': 'https://open.spotify.com/artist/6FnUiliI9F1f2V9THnXxpu'}, 'popularity': 10, 'type': 'artist', 'followers': {'href': None, 'total': 93}, 'name': 'Solange feat. Kendrick Lamar', 'uri': 'spotify:artist:6FnUiliI9F1f2V9THnXxpu', 'id': '6FnUiliI9F1f2V9THnXxpu'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/0724aac29dcf4876b54f01a2813365b92343ed5a', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/c6e8964165d08bc9cb2fc05d68439930db61c890', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/d26bc9756a0c7679f095c9e0e8e13dc9b39febde', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7by6up72jjsUGwmmMitGr1', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7by6up72jjsUGwmmMitGr1'}, 'popularity': 9, 'type': 'artist', 'followers': {'href': None, 'total': 32}, 'name': 'The Kendricks', 'uri': 'spotify:artist:7by6up72jjsUGwmmMitGr1', 'id': '7by6up72jjsUGwmmMitGr1'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1WscexgNxCyVt7Bx5pmsUg', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1WscexgNxCyVt7Bx5pmsUg'}, 'popularity': 11, 'type': 'artist', 'followers': {'href': None, 'total': 39}, 'name': 'Richard Kendrick', 'uri': 'spotify:artist:1WscexgNxCyVt7Bx5pmsUg', 'id': '1WscexgNxCyVt7Bx5pmsUg'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/12a0f2aa81ccde7f63fb02417f44c8de99df1087', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/5a75162ef05c05e950b42d863ca7a811386a97b0', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/64f475b3531667c58f1ef02f1774ec7697b1ac81', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/4UnJ85AumWoUuGnOpLEnl7', 'external_urls': {'spotify': 'https://open.spotify.com/artist/4UnJ85AumWoUuGnOpLEnl7'}, 'popularity': 10, 'type': 'artist', 'followers': {'href': None, 'total': 58}, 'name': 'Darnell Kendricks', 'uri': 'spotify:artist:4UnJ85AumWoUuGnOpLEnl7', 'id': '4UnJ85AumWoUuGnOpLEnl7'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7kj1cdDalpyc3rURdJx8b9', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7kj1cdDalpyc3rURdJx8b9'}, 'popularity': 7, 'type': 'artist', 'followers': {'href': None, 'total': 54}, 'name': 'Eddie Kendrick', 'uri': 'spotify:artist:7kj1cdDalpyc3rURdJx8b9', 'id': '7kj1cdDalpyc3rURdJx8b9'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/2cUIfFEsQgmPlYt75T2Lvy', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2cUIfFEsQgmPlYt75T2Lvy'}, 'popularity': 9, 'type': 'artist', 'followers': {'href': None, 'total': 14}, 'name': 'Alex Kendrick', 'uri': 'spotify:artist:2cUIfFEsQgmPlYt75T2Lvy', 'id': '2cUIfFEsQgmPlYt75T2Lvy'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/3M8jTGG0AItu4XYotTlC6M', 'external_urls': {'spotify': 'https://open.spotify.com/artist/3M8jTGG0AItu4XYotTlC6M'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 451}, 'name': 'Kendrick Lamar & Jay Rock', 'uri': 'spotify:artist:3M8jTGG0AItu4XYotTlC6M', 'id': '3M8jTGG0AItu4XYotTlC6M'}, {'images': [{'height': 600, 'url': 'https://i.scdn.co/image/b388ac8f9a6fef30800af948440f81020bec6ea6', 'width': 411}, {'height': 292, 'url': 'https://i.scdn.co/image/9ede141bfd29ffac1984c61331191e39b7d92e12', 'width': 200}, {'height': 93, 'url': 'https://i.scdn.co/image/eff9add3f6d333717bd051d75721e518596c2719', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/46uaPpO9BqSSlLvSXFwyJs', 'external_urls': {'spotify': 'https://open.spotify.com/artist/46uaPpO9BqSSlLvSXFwyJs'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 66}, 'name': 'Charlotte Kendrick', 'uri': 'spotify:artist:46uaPpO9BqSSlLvSXFwyJs', 'id': '46uaPpO9BqSSlLvSXFwyJs'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/63rMSwRscrbMp9u0MhfDzK', 'external_urls': {'spotify': 'https://open.spotify.com/artist/63rMSwRscrbMp9u0MhfDzK'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 1}, 'name': 'Will Kendrick', 'uri': 'spotify:artist:63rMSwRscrbMp9u0MhfDzK', 'id': '63rMSwRscrbMp9u0MhfDzK'}], 'total': 160}}
# 12. What is the data type of the search result? Print it.
print(type(artist_search_result))
# 13. What are all of the keys that the search result has?
print(artist_search_result.keys())
# 14. Take a look at 'artists' - what keys does it have?
# print('ARTISTS KEYS HERE ########')
print(artist_search_result['artists'].keys())
# 15. Using len() with something-or-other would show me how many results I CURRENTLY have, but I want to know the TOTAL number of results Spotify has for my search result. From looking at the names of the keys under 'artists', how many total results are there?
print("Total number of results is", artist_search_result['artists']['total'])
# 16. How popular is Kendrick Lamar vs. Anna Kendrick? Use a for loop to list the names and popularity of every artist.
for artist in artist_search_result['artists']['items']:
print(artist['name'], "has a popularity of", artist['popularity'], ".")
#print(artist_search_result['artists']['popularity'])
########
#
# Search results from Spotify for a playlist including the term "90s"
# https://api.spotify.com/v1/search?query=90s&limit=20&type=playlist
#
########
playlist_search_result = {'playlists': {'offset': 0, 'next': 'https://api.spotify.com/v1/search?query=90s&offset=20&limit=20&type=playlist', 'limit': 20, 'href': 'https://api.spotify.com/v1/search?query=90s&offset=0&limit=20&type=playlist', 'previous': None, 'items': [{'public': None, 'snapshot_id': 'X2zFSzFviruyjSFdaByIuiT9se/LKmkQFWbqY+NzH+TQ1Sj4rH0Q/0WxQlKrNvKw', 'id': '5TcHWbnN6SIhvPY1MXMDrb', 'tracks': {'href': 'https://api.spotify.com/v1/users/sam85uk/playlists/5TcHWbnN6SIhvPY1MXMDrb/tracks', 'total': 937}, 'external_urls': {'spotify': 'http://open.spotify.com/user/sam85uk/playlist/5TcHWbnN6SIhvPY1MXMDrb'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/sam85uk'}, 'type': 'user', 'uri': 'spotify:user:sam85uk', 'href': 'https://api.spotify.com/v1/users/sam85uk', 'id': 'sam85uk'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 60}], 'uri': 'spotify:user:sam85uk:playlist:5TcHWbnN6SIhvPY1MXMDrb', 'name': '90s', 'href': 'https://api.spotify.com/v1/users/sam85uk/playlists/5TcHWbnN6SIhvPY1MXMDrb'}, {'public': None, 'snapshot_id': 'vs/L+N+xSmk4giRCCNAsNK4hljW++tS/RHSq0HJbYqWCXo65jGmGpT9ssHu5GgEh', 'id': '6OugflVBHYjm6HIjCObsz4', 'tracks': {'href': 'https://api.spotify.com/v1/users/1220462882/playlists/6OugflVBHYjm6HIjCObsz4/tracks', 'total': 924}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1220462882/playlist/6OugflVBHYjm6HIjCObsz4'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1220462882'}, 'type': 'user', 'uri': 'spotify:user:1220462882', 'href': 'https://api.spotify.com/v1/users/1220462882', 'id': '1220462882'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 60}], 'uri': 'spotify:user:1220462882:playlist:6OugflVBHYjm6HIjCObsz4', 'name': '90s country', 'href': 'https://api.spotify.com/v1/users/1220462882/playlists/6OugflVBHYjm6HIjCObsz4'}, {'public': None, 'snapshot_id': 'ZMcQcP9SEc7jKFwoEO97LcQzLn0iBMcC2NjnFKEcbpKLXkbc7f6n1yjdXitIg32A', 'id': '5e1bpazQUEHijFhcJobkAp', 'tracks': {'href': 'https://api.spotify.com/v1/users/luccyyy/playlists/5e1bpazQUEHijFhcJobkAp/tracks', 'total': 164}, 'external_urls': {'spotify': 'http://open.spotify.com/user/luccyyy/playlist/5e1bpazQUEHijFhcJobkAp'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/luccyyy'}, 'type': 'user', 'uri': 'spotify:user:luccyyy', 'href': 'https://api.spotify.com/v1/users/luccyyy', 'id': 'luccyyy'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 60}], 'uri': 'spotify:user:luccyyy:playlist:5e1bpazQUEHijFhcJobkAp', 'name': "'90s Rock", 'href': 'https://api.spotify.com/v1/users/luccyyy/playlists/5e1bpazQUEHijFhcJobkAp'}, {'public': None, 'snapshot_id': '5JWrVBSsfjaBGPt2OGlZ8ZJgkI9xB39TC5ISrrK/yYuFvgzOIji3eeHWfCenGpbI', 'id': '5v6cPhjJxSgNn6Aluh0lqV', 'tracks': {'href': 'https://api.spotify.com/v1/users/crockstarltd/playlists/5v6cPhjJxSgNn6Aluh0lqV/tracks', 'total': 137}, 'external_urls': {'spotify': 'http://open.spotify.com/user/crockstarltd/playlist/5v6cPhjJxSgNn6Aluh0lqV'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/crockstarltd'}, 'type': 'user', 'uri': 'spotify:user:crockstarltd', 'href': 'https://api.spotify.com/v1/users/crockstarltd', 'id': 'crockstarltd'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 60}], 'uri': 'spotify:user:crockstarltd:playlist:5v6cPhjJxSgNn6Aluh0lqV', 'name': '90s alternative', 'href': 'https://api.spotify.com/v1/users/crockstarltd/playlists/5v6cPhjJxSgNn6Aluh0lqV'}, {'public': None, 'snapshot_id': 'NNzGf/01IMRYYmlNZhEehdkG8FONeb5xdyiUoVUVUsk5hovm++HAO4+UYckAlz7F', 'id': '3wneNouatgyKcqjPJtBP2L', 'tracks': {'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309/playlists/3wneNouatgyKcqjPJtBP2L/tracks', 'total': 815}, 'external_urls': {'spotify': 'http://open.spotify.com/user/macaulaymeudt92309/playlist/3wneNouatgyKcqjPJtBP2L'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/macaulaymeudt92309'}, 'type': 'user', 'uri': 'spotify:user:macaulaymeudt92309', 'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309', 'id': 'macaulaymeudt92309'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 60}], 'uri': 'spotify:user:macaulaymeudt92309:playlist:3wneNouatgyKcqjPJtBP2L', 'name': "'90s Hits ", 'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309/playlists/3wneNouatgyKcqjPJtBP2L'}, {'public': None, 'snapshot_id': 'iACKRBrA4eZtFBpk0oCrYEJvBNaSOr2utdXOi2mwuGXMmMv/utD0w/hyLhEkHxDI', 'id': '29jIRPHN6igNaKMwsMQudX', 'tracks': {'href': 'https://api.spotify.com/v1/users/12445745/playlists/29jIRPHN6igNaKMwsMQudX/tracks', 'total': 185}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12445745/playlist/29jIRPHN6igNaKMwsMQudX'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12445745'}, 'type': 'user', 'uri': 'spotify:user:12445745', 'href': 'https://api.spotify.com/v1/users/12445745', 'id': '12445745'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 60}], 'uri': 'spotify:user:12445745:playlist:29jIRPHN6igNaKMwsMQudX', 'name': '90s RnB', 'href': 'https://api.spotify.com/v1/users/12445745/playlists/29jIRPHN6igNaKMwsMQudX'}, {'public': None, 'snapshot_id': 'MiF9QytElWJwgY1nXZlaw8HJMF/mPV+UYdk35yEs93gqHPpFW8YAxZs90asJ3yBd', 'id': '3QmFUqc1Io68CVD2NL2iLM', 'tracks': {'href': 'https://api.spotify.com/v1/users/1297361113/playlists/3QmFUqc1Io68CVD2NL2iLM/tracks', 'total': 281}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1297361113/playlist/3QmFUqc1Io68CVD2NL2iLM'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1297361113'}, 'type': 'user', 'uri': 'spotify:user:1297361113', 'href': 'https://api.spotify.com/v1/users/1297361113', 'id': '1297361113'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 60}], 'uri': 'spotify:user:1297361113:playlist:3QmFUqc1Io68CVD2NL2iLM', 'name': 'ROCK 90S INGLES', 'href': 'https://api.spotify.com/v1/users/1297361113/playlists/3QmFUqc1Io68CVD2NL2iLM'}, {'public': None, 'snapshot_id': 'n6rtZGQ+Y7BG/Y+w7awcPmbn2Tus4ahRSOzDF67JzqtTqYjSMV8x58WQUUySFRls', 'id': '5772HGqmp2E99GQo5tfmcJ', 'tracks': {'href': 'https://api.spotify.com/v1/users/19jconnell79/playlists/5772HGqmp2E99GQo5tfmcJ/tracks', 'total': 211}, 'external_urls': {'spotify': 'http://open.spotify.com/user/19jconnell79/playlist/5772HGqmp2E99GQo5tfmcJ'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/19jconnell79'}, 'type': 'user', 'uri': 'spotify:user:19jconnell79', 'href': 'https://api.spotify.com/v1/users/19jconnell79', 'id': '19jconnell79'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 60}], 'uri': 'spotify:user:19jconnell79:playlist:5772HGqmp2E99GQo5tfmcJ', 'name': '90s rap/hip hop', 'href': 'https://api.spotify.com/v1/users/19jconnell79/playlists/5772HGqmp2E99GQo5tfmcJ'}, {'public': None, 'snapshot_id': 'SXYTRsNxGol+X5UGoW5s8NxYHyPD9aRb4l+WyIIFDXvBqF0BHVnoghcUzvM4qELA', 'id': '6PtCrShmNnLSP1Vr4PAyVm', 'tracks': {'href': 'https://api.spotify.com/v1/users/spotifyaustralia/playlists/6PtCrShmNnLSP1Vr4PAyVm/tracks', 'total': 50}, 'external_urls': {'spotify': 'http://open.spotify.com/user/spotifyaustralia/playlist/6PtCrShmNnLSP1Vr4PAyVm'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/spotifyaustralia'}, 'type': 'user', 'uri': 'spotify:user:spotifyaustralia', 'href': 'https://api.spotify.com/v1/users/spotifyaustralia', 'id': 'spotifyaustralia'}, 'collaborative': False, 'images': [{'height': None, 'url': 'https://u.scdn.co/images/pl/default/2bb39ac6d272a86f9fb377d26249c942b6138a97', 'width': None}], 'uri': 'spotify:user:spotifyaustralia:playlist:6PtCrShmNnLSP1Vr4PAyVm', 'name': '#ThrowbackThursday', 'href': 'https://api.spotify.com/v1/users/spotifyaustralia/playlists/6PtCrShmNnLSP1Vr4PAyVm'}, {'public': None, 'snapshot_id': 'uUQscWhcfXEt0hICBieJ3bq5hST6l4P+cAEFMDNlZPhRmU7CxbXNg75BMxph1fxD', 'id': '5TvJd5fV2RMgv87mdd516L', 'tracks': {'href': 'https://api.spotify.com/v1/users/jordancstone/playlists/5TvJd5fV2RMgv87mdd516L/tracks', 'total': 310}, 'external_urls': {'spotify': 'http://open.spotify.com/user/jordancstone/playlist/5TvJd5fV2RMgv87mdd516L'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/jordancstone'}, 'type': 'user', 'uri': 'spotify:user:jordancstone', 'href': 'https://api.spotify.com/v1/users/jordancstone', 'id': 'jordancstone'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 60}], 'uri': 'spotify:user:jordancstone:playlist:5TvJd5fV2RMgv87mdd516L', 'name': '90s ROCK (300 tracks)', 'href': 'https://api.spotify.com/v1/users/jordancstone/playlists/5TvJd5fV2RMgv87mdd516L'}, {'public': None, 'snapshot_id': 'CUhnTrVMGslFOFW2i9ncLzbo39Qu/vNIAuRdXEea1NP3SPMlu9PHd2PnrYX3BYHR', 'id': '35iftafjBbC2wWKIgelOf6', 'tracks': {'href': 'https://api.spotify.com/v1/users/spotify_uk_/playlists/35iftafjBbC2wWKIgelOf6/tracks', 'total': 70}, 'external_urls': {'spotify': 'http://open.spotify.com/user/spotify_uk_/playlist/35iftafjBbC2wWKIgelOf6'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/spotify_uk_'}, 'type': 'user', 'uri': 'spotify:user:spotify_uk_', 'href': 'https://api.spotify.com/v1/users/spotify_uk_', 'id': 'spotify_uk_'}, 'collaborative': False, 'images': [{'height': None, 'url': 'https://u.scdn.co/images/pl/default/f6148a7d6511c45c3099dfbaa9a0c7af4c1de77b', 'width': None}], 'uri': 'spotify:user:spotify_uk_:playlist:35iftafjBbC2wWKIgelOf6', 'name': '90s R&B Anthems', 'href': 'https://api.spotify.com/v1/users/spotify_uk_/playlists/35iftafjBbC2wWKIgelOf6'}, {'public': None, 'snapshot_id': 'EIuQGXw7ea1XtV0NeUT/RhLnudL0cW2ORvZl9fBh+pJHIgR1gsm8eH0N4Oy/QeLP', 'id': '5yGuoOwRQF3o8NVRRlvCj7', 'tracks': {'href': 'https://api.spotify.com/v1/users/truckasaurus1/playlists/5yGuoOwRQF3o8NVRRlvCj7/tracks', 'total': 98}, 'external_urls': {'spotify': 'http://open.spotify.com/user/truckasaurus1/playlist/5yGuoOwRQF3o8NVRRlvCj7'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/truckasaurus1'}, 'type': 'user', 'uri': 'spotify:user:truckasaurus1', 'href': 'https://api.spotify.com/v1/users/truckasaurus1', 'id': 'truckasaurus1'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 60}], 'uri': 'spotify:user:truckasaurus1:playlist:5yGuoOwRQF3o8NVRRlvCj7', 'name': '90s Hiphop / Gangsta Rap', 'href': 'https://api.spotify.com/v1/users/truckasaurus1/playlists/5yGuoOwRQF3o8NVRRlvCj7'}, {'public': None, 'snapshot_id': 'lSl+DalM6hI5UJfkejtCwkcTyKjlQbDOhTrvI9niH7m+zX+hR5jswQegrji0+DjU', 'id': '6jpj4xWiZe6891Ba3TADOO', 'tracks': {'href': 'https://api.spotify.com/v1/users/jec904/playlists/6jpj4xWiZe6891Ba3TADOO/tracks', 'total': 219}, 'external_urls': {'spotify': 'http://open.spotify.com/user/jec904/playlist/6jpj4xWiZe6891Ba3TADOO'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/jec904'}, 'type': 'user', 'uri': 'spotify:user:jec904', 'href': 'https://api.spotify.com/v1/users/jec904', 'id': 'jec904'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 60}], 'uri': 'spotify:user:jec904:playlist:6jpj4xWiZe6891Ba3TADOO', 'name': '90s Country', 'href': 'https://api.spotify.com/v1/users/jec904/playlists/6jpj4xWiZe6891Ba3TADOO'}, {'public': None, 'snapshot_id': 'NAzpFjabibSDSRIlNQM4LtPGVLyP14W+PtFemfOZ3WIDa1t+tkBeZIE3ubESfD8z', 'id': '0QEvLnkCV2hQx8AVs9VSCq', 'tracks': {'href': 'https://api.spotify.com/v1/users/12145536328/playlists/0QEvLnkCV2hQx8AVs9VSCq/tracks', 'total': 70}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12145536328/playlist/0QEvLnkCV2hQx8AVs9VSCq'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12145536328'}, 'type': 'user', 'uri': 'spotify:user:12145536328', 'href': 'https://api.spotify.com/v1/users/12145536328', 'id': '12145536328'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 60}], 'uri': 'spotify:user:12145536328:playlist:0QEvLnkCV2hQx8AVs9VSCq', 'name': 'Boybands 90s', 'href': 'https://api.spotify.com/v1/users/12145536328/playlists/0QEvLnkCV2hQx8AVs9VSCq'}, {'public': None, 'snapshot_id': 'xFuRVW3gHlQ2duHjA8Y+n+5dEsl5aewbp0tveVWMXXpWImllg5+tXPpuoNSh+c2Q', 'id': '3XctgNePp5NFKiaIYB8z2F', 'tracks': {'href': 'https://api.spotify.com/v1/users/gunnerwaddell/playlists/3XctgNePp5NFKiaIYB8z2F/tracks', 'total': 119}, 'external_urls': {'spotify': 'http://open.spotify.com/user/gunnerwaddell/playlist/3XctgNePp5NFKiaIYB8z2F'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/gunnerwaddell'}, 'type': 'user', 'uri': 'spotify:user:gunnerwaddell', 'href': 'https://api.spotify.com/v1/users/gunnerwaddell', 'id': 'gunnerwaddell'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 60}], 'uri': 'spotify:user:gunnerwaddell:playlist:3XctgNePp5NFKiaIYB8z2F', 'name': '90s early 2000 country', 'href': 'https://api.spotify.com/v1/users/gunnerwaddell/playlists/3XctgNePp5NFKiaIYB8z2F'}, {'public': None, 'snapshot_id': 'aSYk2sFcfUQitS99gpCYw+eohvnlA+o8kNcfMHDb7eB4+bvyV1hLAW7M+lMNSaYv', 'id': '7zK2WjuX5otv9au92VXsKc', 'tracks': {'href': 'https://api.spotify.com/v1/users/1220108378/playlists/7zK2WjuX5otv9au92VXsKc/tracks', 'total': 662}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1220108378/playlist/7zK2WjuX5otv9au92VXsKc'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1220108378'}, 'type': 'user', 'uri': 'spotify:user:1220108378', 'href': 'https://api.spotify.com/v1/users/1220108378', 'id': '1220108378'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 60}], 'uri': 'spotify:user:1220108378:playlist:7zK2WjuX5otv9au92VXsKc', 'name': 'Ultimate 90s Playlist', 'href': 'https://api.spotify.com/v1/users/1220108378/playlists/7zK2WjuX5otv9au92VXsKc'}, {'public': None, 'snapshot_id': 'bSeyYo2va/u8/fZWTX9PJ7pfdEzUhv605BWH56JACU1w3PNSztzJFfQji4fJ7pVR', 'id': '6k8WK2AGWnwD37I9kh2zvq', 'tracks': {'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos/playlists/6k8WK2AGWnwD37I9kh2zvq/tracks', 'total': 209}, 'external_urls': {'spotify': 'http://open.spotify.com/user/stephaniegeorgopulos/playlist/6k8WK2AGWnwD37I9kh2zvq'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/stephaniegeorgopulos'}, 'type': 'user', 'uri': 'spotify:user:stephaniegeorgopulos', 'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos', 'id': 'stephaniegeorgopulos'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 60}], 'uri': 'spotify:user:stephaniegeorgopulos:playlist:6k8WK2AGWnwD37I9kh2zvq', 'name': '90s R&B', 'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos/playlists/6k8WK2AGWnwD37I9kh2zvq'}, {'public': None, 'snapshot_id': '+vMVeKIqmjtjzAoZGwbi0v8AZLbPFrqUgmAqBYblyJQyi8p3l43TNdqqPSwNKPPd', 'id': '5DygagPVN6KmHre6MJNAJ4', 'tracks': {'href': 'https://api.spotify.com/v1/users/bigju85/playlists/5DygagPVN6KmHre6MJNAJ4/tracks', 'total': 822}, 'external_urls': {'spotify': 'http://open.spotify.com/user/bigju85/playlist/5DygagPVN6KmHre6MJNAJ4'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/bigju85'}, 'type': 'user', 'uri': 'spotify:user:bigju85', 'href': 'https://api.spotify.com/v1/users/bigju85', 'id': 'bigju85'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 60}], 'uri': 'spotify:user:bigju85:playlist:5DygagPVN6KmHre6MJNAJ4', 'name': '90s Rap', 'href': 'https://api.spotify.com/v1/users/bigju85/playlists/5DygagPVN6KmHre6MJNAJ4'}, {'public': None, 'snapshot_id': 'b5YyIRMEU/0WE+OYt2CGO7uoRjSRccLWKzcJzc04gE35qSICKr+KDxO1mZE+v+/H', 'id': '6ZblzG0k7kzs0Ex7EdFWY6', 'tracks': {'href': 'https://api.spotify.com/v1/users/1232709737/playlists/6ZblzG0k7kzs0Ex7EdFWY6/tracks', 'total': 136}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1232709737/playlist/6ZblzG0k7kzs0Ex7EdFWY6'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1232709737'}, 'type': 'user', 'uri': 'spotify:user:1232709737', 'href': 'https://api.spotify.com/v1/users/1232709737', 'id': '1232709737'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 60}], 'uri': 'spotify:user:1232709737:playlist:6ZblzG0k7kzs0Ex7EdFWY6', 'name': '90s PARTY', 'href': 'https://api.spotify.com/v1/users/1232709737/playlists/6ZblzG0k7kzs0Ex7EdFWY6'}, {'public': None, 'snapshot_id': 'sqjT9Ws60MZTay5r3UzHelKm9qD/mgeZ9kYFyFw9Og4nX4mVvgz3WhA+Red3MfTF', 'id': '5t30PswiZDYfZAIrTwGr2V', 'tracks': {'href': 'https://api.spotify.com/v1/users/12143691853/playlists/5t30PswiZDYfZAIrTwGr2V/tracks', 'total': 88}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12143691853/playlist/5t30PswiZDYfZAIrTwGr2V'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12143691853'}, 'type': 'user', 'uri': 'spotify:user:12143691853', 'href': 'https://api.spotify.com/v1/users/12143691853', 'id': '12143691853'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 60}], 'uri': 'spotify:user:12143691853:playlist:5t30PswiZDYfZAIrTwGr2V', 'name': 'lovesongs 80s to 90s', 'href': 'https://api.spotify.com/v1/users/12143691853/playlists/5t30PswiZDYfZAIrTwGr2V'}], 'total': 2538}}
# 17. What is the data type of the search result? Print it.
print(type(playlist_search_result))
# 18. What are all of the keys that the search result has?
print(playlist_search_result.keys())
# 19. Take a look at 'playlists' - what keys does it have?
print(playlist_search_result['playlists'].keys())
# 20. Save the list of playlists into a variable called 'playlists'
playlists = playlist_search_result['playlists']
# 21. Print the title of every playlist
for playlist in playlists['items']:
print(playlist['name'])
# 22. Loop through every playlist, printing its keys
print(playlist.keys())
# 23. What is the data type of a playlist's 'tracks'?
print(type(playlist['tracks']))
# 24. Print the name and number of tracks for every playlist
print("keys for tracks",playlist['tracks'].keys())
print("The playlist", playlist['name'], "has", playlist['tracks']['total'], "tracks.")
#print(len(playlist['tracks']))
#print(playlist['tracks'])
# 25. We like curation! Loop through the playlists again, but only display those with fewer than 200 tracks.
print("The following playlists have fewer than 200 tracks.")
for playlist in playlists['items']:
if playlist['tracks']['total'] < 200:
print(playlist['name'])
| mit |
huaweiswitch/neutron | neutron/tests/unit/mlnx/test_defaults.py | 25 | 1508 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
#NOTE this import loads tests required options
from neutron.plugins.mlnx.common import config # noqa
from neutron.tests import base
class ConfigurationTest(base.BaseTestCase):
def test_defaults(self):
self.assertEqual(2,
cfg.CONF.AGENT.polling_interval)
self.assertEqual('vlan',
cfg.CONF.MLNX.tenant_network_type)
self.assertEqual(1,
len(cfg.CONF.MLNX.network_vlan_ranges))
self.assertEqual('eth',
cfg.CONF.MLNX.physical_network_type)
self.assertFalse(cfg.CONF.MLNX.physical_network_type_mappings)
self.assertEqual(0,
len(cfg.CONF.ESWITCH.
physical_interface_mappings))
self.assertEqual('tcp://127.0.0.1:60001',
cfg.CONF.ESWITCH.daemon_endpoint)
| apache-2.0 |
cg31/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/series_test.py | 29 | 2390 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Series class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
class TransformedSeriesTest(tf.test.TestCase):
"""Test of `TransformedSeries`."""
def test_repr(self):
col = learn.TransformedSeries(
[mocks.MockSeries("foobar", [])],
mocks.MockTwoOutputTransform("thb", "nth", "snt"), "qux")
# note params are sorted by name
expected = ("MockTransform({'param_one': 'thb', 'param_three': 'snt', "
"'param_two': 'nth'})"
"(foobar)[qux]")
self.assertEqual(expected, repr(col))
def test_build_no_output(self):
def create_no_output_series():
return learn.TransformedSeries(
[mocks.MockSeries("foobar", [])],
mocks.MockZeroOutputTransform("thb", "nth"), None)
self.assertRaises(ValueError, create_no_output_series)
def test_build_single_output(self):
col = learn.TransformedSeries(
[mocks.MockSeries("foobar", [])],
mocks.MockOneOutputTransform("thb", "nth"), "out1")
result = col.build()
expected = mocks.MockTensor("Mock Tensor 1", tf.int32)
self.assertEqual(expected, result)
def test_build_multiple_output(self):
col = learn.TransformedSeries(
[mocks.MockSeries("foobar", [])],
mocks.MockTwoOutputTransform("thb", "nth", "snt"), "out2")
result = col.build()
expected = mocks.MockTensor("Mock Tensor 2", tf.int32)
self.assertEqual(expected, result)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
blerer/cassandra-dtest | token_generator_test.py | 4 | 6631 | import os
import subprocess
import time
import pytest
import parse
import logging
from cassandra.util import sortedset
from ccmlib import common
from dtest import Tester
from tools.data import rows_to_list
since = pytest.mark.since
logger = logging.getLogger(__name__)
@since('2.2', max_version='3.0.0')
class TestTokenGenerator(Tester):
"""
Basic tools/bin/token-generator test.
Token-generator was removed in CASSANDRA-5261
@jira_ticket CASSANDRA-5261
@jira_ticket CASSANDRA-9300
"""
def call_token_generator(self, install_dir, randomPart, nodes):
executable = os.path.join(install_dir, 'tools', 'bin', 'token-generator')
if common.is_win():
executable += ".bat"
args = [executable]
if randomPart is not None:
if randomPart:
args.append("--random")
else:
args.append("--murmur3")
for n in nodes:
args.append(str(n))
logger.debug('Invoking {}'.format(args))
token_gen_output = subprocess.check_output(args).decode()
lines = token_gen_output.split("\n")
dc_tokens = None
generated_tokens = []
for line in lines:
if line.startswith("DC #"):
if dc_tokens is not None:
assert dc_tokens.__len__(), 0 > "dc_tokens is empty from token-generator {}".format(args)
generated_tokens.append(dc_tokens)
dc_tokens = []
else:
if line:
m = parse.search('Node #{node_num:d}:{:s}{node_token:d}', line)
assert m, "Line \"{}\" does not match pattern from token-generator {}".format(line is not None, args)
node_num = int(m.named['node_num'])
node_token = int(m.named['node_token'])
dc_tokens.append(node_token)
assert node_num, dc_tokens.__len__() == "invalid token count from token-generator {}".format(args)
assert dc_tokens is not None, "No tokens from token-generator {}".format(args)
assert dc_tokens.__len__(), 0 > "No tokens from token-generator {}".format(args)
generated_tokens.append(dc_tokens)
return generated_tokens
def prepare(self, randomPart=None, nodes=1):
cluster = self.cluster
install_dir = cluster.get_install_dir()
generated_tokens = self.call_token_generator(install_dir, randomPart, [nodes])
if not randomPart:
cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner")
else:
if randomPart:
cluster.set_partitioner("org.apache.cassandra.dht.RandomPartitioner")
else:
cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner")
# remove these from cluster options - otherwise node's config would be overridden with cluster._config_options_
cluster._config_options.__delitem__('num_tokens')
if self.dtest_config.use_vnodes:
cluster._config_options.__delitem__('initial_token')
assert not cluster.nodelist(), "nodelist() already initialized"
cluster.populate(nodes, use_vnodes=False, tokens=generated_tokens[0]).start(wait_for_binary_proto=True)
time.sleep(0.2)
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
return generated_tokens, session
def _token_gen_test(self, nodes, randomPart=None):
generated_tokens, session = self.prepare(randomPart, nodes=nodes)
dc_tokens = generated_tokens[0]
tokens = []
local_tokens = rows_to_list(session.execute("SELECT tokens FROM system.local"))[0]
assert local_tokens.__len__(), 1 == "too many tokens for peer"
for tok in local_tokens:
tokens += tok
rows = rows_to_list(session.execute("SELECT tokens FROM system.peers"))
assert rows.__len__() == nodes - 1
for row in rows:
peer_tokens = row[0]
assert peer_tokens.__len__(), 1 == "too many tokens for peer"
for tok in peer_tokens:
tokens.append(tok)
assert tokens.__len__() == dc_tokens.__len__()
for cluster_token in tokens:
tok = int(cluster_token)
assert dc_tokens.index(tok), 0 >= "token in cluster does not match generated tokens"
def token_gen_def_test(self, nodes=3):
""" Validate token-generator with Murmur3Partitioner with default token-generator behavior """
self._token_gen_test(nodes)
def token_gen_murmur3_test(self, nodes=3):
""" Validate token-generator with Murmur3Partitioner with explicit murmur3 """
self._token_gen_test(nodes, False)
def token_gen_random_test(self, nodes=3):
""" Validate token-generator with Murmur3Partitioner with explicit random """
self._token_gen_test(nodes, True)
dc_nodes_combinations = [
[3, 5],
[3, 5, 5],
[12, 5, 7],
[50, 100, 250],
[100, 100, 100],
[250, 250, 250],
[1000, 1000, 1000],
[2500, 2500, 2500, 2500]
]
def _multi_dc_tokens(self, random=None):
t_min = 0
t_max = 1 << 127
if random is None or not random:
t_min = -1 << 63
t_max = 1 << 63
for dc_nodes in self.dc_nodes_combinations:
all_tokens = sortedset()
node_count = 0
generated_tokens = self.call_token_generator(self.cluster.get_install_dir(), random, dc_nodes)
assert dc_nodes.__len__() == generated_tokens.__len__()
for n in range(0, dc_nodes.__len__()):
nodes = dc_nodes[n]
node_count += nodes
tokens = generated_tokens[n]
assert nodes == tokens.__len__()
for tok in tokens:
assert t_min <= tok < t_max, "Generated token %r out of Murmur3Partitioner range %r..%r" % (tok, t_min, t_max - 1)
assert not all_tokens.__contains__(tok), "Duplicate token %r for nodes-counts %r" % (tok, dc_nodes)
all_tokens.add(tok)
assert all_tokens.__len__() == node_count, "Number of tokens %r and number of nodes %r does not match for %r" % (all_tokens.__len__(), node_count, dc_nodes)
def test_multi_dc_tokens_default(self):
self._multi_dc_tokens()
def test_multi_dc_tokens_murmur3(self):
self._multi_dc_tokens(False)
def test_multi_dc_tokens_random(self):
self._multi_dc_tokens(True)
| apache-2.0 |
malmiron/incubator-airflow | tests/www_rbac/api/experimental/test_dag_runs_endpoint.py | 14 | 4885 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from airflow import configuration
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagRun
from airflow.settings import Session
from airflow.www_rbac import app as application
class TestDagRunsEndpoint(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestDagRunsEndpoint, cls).setUpClass()
session = Session()
session.query(DagRun).delete()
session.commit()
session.close()
def setUp(self):
super(TestDagRunsEndpoint, self).setUp()
configuration.load_test_config()
app, _ = application.create_app(testing=True)
self.app = app.test_client()
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.commit()
session.close()
super(TestDagRunsEndpoint, self).tearDown()
def test_get_dag_runs_success(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
# Create DagRun
dag_run = trigger_dag(dag_id=dag_id, run_id='test_get_dag_runs_success')
response = self.app.get(url_template.format(dag_id))
self.assertEqual(200, response.status_code)
data = json.loads(response.data.decode('utf-8'))
self.assertIsInstance(data, list)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['dag_id'], dag_id)
self.assertEqual(data[0]['id'], dag_run.id)
def test_get_dag_runs_success_with_state_parameter(self):
url_template = '/api/experimental/dags/{}/dag_runs?state=running'
dag_id = 'example_bash_operator'
# Create DagRun
dag_run = trigger_dag(dag_id=dag_id, run_id='test_get_dag_runs_success')
response = self.app.get(url_template.format(dag_id))
self.assertEqual(200, response.status_code)
data = json.loads(response.data.decode('utf-8'))
self.assertIsInstance(data, list)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['dag_id'], dag_id)
self.assertEqual(data[0]['id'], dag_run.id)
def test_get_dag_runs_success_with_capital_state_parameter(self):
url_template = '/api/experimental/dags/{}/dag_runs?state=RUNNING'
dag_id = 'example_bash_operator'
# Create DagRun
dag_run = trigger_dag(dag_id=dag_id, run_id='test_get_dag_runs_success')
response = self.app.get(url_template.format(dag_id))
self.assertEqual(200, response.status_code)
data = json.loads(response.data.decode('utf-8'))
self.assertIsInstance(data, list)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['dag_id'], dag_id)
self.assertEqual(data[0]['id'], dag_run.id)
def test_get_dag_runs_success_with_state_no_result(self):
url_template = '/api/experimental/dags/{}/dag_runs?state=dummy'
dag_id = 'example_bash_operator'
# Create DagRun
trigger_dag(dag_id=dag_id, run_id='test_get_dag_runs_success')
response = self.app.get(url_template.format(dag_id))
self.assertEqual(200, response.status_code)
data = json.loads(response.data.decode('utf-8'))
self.assertIsInstance(data, list)
self.assertEqual(len(data), 0)
def test_get_dag_runs_invalid_dag_id(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'DUMMY_DAG'
response = self.app.get(url_template.format(dag_id))
self.assertEqual(400, response.status_code)
data = json.loads(response.data.decode('utf-8'))
self.assertNotIsInstance(data, list)
def test_get_dag_runs_no_runs(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
response = self.app.get(url_template.format(dag_id))
self.assertEqual(200, response.status_code)
data = json.loads(response.data.decode('utf-8'))
self.assertIsInstance(data, list)
self.assertEqual(len(data), 0)
| apache-2.0 |
bdestombe/flopy-1 | flopy/modpath/mpbas.py | 3 | 5551 | """
mpbas module. Contains the ModpathBas class. Note that the user can access
the ModpathBas class as `flopy.modflow.ModpathBas`.
Additional information for this MODFLOW/MODPATH package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?bas6.htm>`_.
"""
import numpy as np
from numpy import empty, array
from ..pakbase import Package
from ..utils import Util2d, Util3d
class ModpathBas(Package):
"""
MODPATH Basic Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modpath.mp.Modpath`) to which
this package will be added.
hnoflo : float
Head value assigned to inactive cells (default is -9999.).
hdry : float
Head value assigned to dry cells (default is -8888.).
def_face_ct : int
Number fo default iface codes to read (default is 0).
bud_label : str or list of strs
MODFLOW budget item to which a default iface is assigned.
def_iface : int or list of ints
Cell face (iface) on which to assign flows from MODFLOW budget file.
laytyp : int or list of ints
MODFLOW layer type (0 is convertible, 1 is confined).
ibound : array of ints, optional
The ibound array (the default is 1).
prsity : array of ints, optional
The porosity array (the default is 0.30).
prsityCB : array of ints, optional
The porosity array for confining beds (the default is 0.30).
extension : str, optional
File extension (default is 'mpbas').
Attributes
----------
heading : str
Text string written to top of package input file.
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modpath.Modpath()
>>> mpbas = flopy.modpath.ModpathBas(m)
"""
def __init__(self, model, hnoflo=-9999., hdry=-8888.,
def_face_ct=0, bud_label=None, def_iface=None,
laytyp=0, ibound=1, prsity=0.30, prsityCB=0.30,
extension='mpbas', unitnumber = 86):
"""
Package constructor.
"""
Package.__init__(self, model, extension, 'MPBAS', unitnumber)
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
self.parent.mf.get_name_file_entries()
self.heading1 = '# MPBAS for Modpath, generated by Flopy.'
self.heading2 = '#'
self.hnoflo = hnoflo
self.hdry = hdry
self.def_face_ct = def_face_ct
self.bud_label = bud_label
self.def_iface = def_iface
self.laytyp = laytyp
self.ibound = Util3d(model, (nlay, nrow, ncol), np.int, ibound,
name='ibound', locat=self.unit_number[0])
self.prsity = prsity
self.prsityCB = prsityCB
self.prsity = Util3d(model,(nlay,nrow,ncol),np.float32,\
prsity,name='prsity',locat=self.unit_number[0])
self.prsityCB = Util3d(model,(nlay,nrow,ncol),np.float32,\
prsityCB,name='prsityCB',locat=self.unit_number[0])
self.parent.add_package(self)
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
ModflowDis = self.parent.mf.get_package('DIS')
# Open file for writing
f_bas = open(self.fn_path, 'w')
f_bas.write('#{0:s}\n#{1:s}\n'.format(self.heading1,self.heading2))
f_bas.write('{0:16.6f} {1:16.6f}\n'\
.format(self.hnoflo, self.hdry))
f_bas.write('{0:4d}\n'\
.format(self.def_face_ct))
if self.def_face_ct > 0:
for i in range(self.def_face_ct):
f_bas.write('{0:20s}\n'.format(self.bud_label[i]))
f_bas.write('{0:2d}\n'.format(self.def_iface[i]))
#f_bas.write('\n')
flow_package = self.parent.mf.get_package('BCF6')
if (flow_package != None):
lc = Util2d(self.parent,(nlay,),np.int,\
flow_package.laycon.get_value(),name='bas - laytype',\
locat=self.unit_number[0])
else:
flow_package = self.parent.mf.get_package('LPF')
if (flow_package != None):
lc = Util2d(self.parent,(nlay,),\
np.int,flow_package.laytyp.get_value(),\
name='bas - laytype',locat=self.unit_number[0])
else:
flow_package = self.parent.mf.get_package('UPW')
if (flow_package != None):
lc = Util2d(self.parent,(nlay,),\
np.int,flow_package.laytyp.get_value(),\
name='bas - laytype', locat=self.unit_number[0])
# need to reset lc fmtin
lc.set_fmtin('(40I2)')
f_bas.write(lc.string)
# from modpath bas--uses keyword array types
f_bas.write(self.ibound.get_file_entry())
# from MT3D bas--uses integer array types
#f_bas.write(self.ibound.get_file_entry())
f_bas.write(self.prsity.get_file_entry())
f_bas.write(self.prsityCB.get_file_entry())
f_bas.close() | bsd-3-clause |
mwmuni/LIGGGHTS_GUI | networkx/generators/geometric.py | 30 | 11734 | # -*- coding: utf-8 -*-
"""
Generators for geometric graphs.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult (dschult@colgate.edu)',
'Ben Edwards (BJEdwards@gmail.com)'])
__all__ = ['random_geometric_graph',
'waxman_graph',
'geographical_threshold_graph',
'navigable_small_world_graph']
from bisect import bisect_left
from functools import reduce
from itertools import product
import math, random, sys
import networkx as nx
#---------------------------------------------------------------------------
# Random Geometric Graphs
#---------------------------------------------------------------------------
def random_geometric_graph(n, radius, dim=2, pos=None):
"""Returns a random geometric graph in the unit cube.
The random geometric graph model places ``n`` nodes uniformly at random in
the unit cube. Two nodes are joined by an edge if the Euclidean distance
between the nodes is at most ``radius``.
Parameters
----------
n : int
Number of nodes
radius: float
Distance threshold value
dim : int, optional
Dimension of graph
pos : dict, optional
A dictionary keyed by node with node positions as values.
Returns
-------
Graph
Examples
--------
Create a random geometric graph on twenty nodes where nodes are joined by
an edge if their distance is at most 0.1::
>>> G = nx.random_geometric_graph(20, 0.1)
Notes
-----
This algorithm currently only supports Euclidean distance.
This uses an `O(n^2)` algorithm to build the graph. A faster algorithm
is possible using k-d trees.
The ``pos`` keyword argument can be used to specify node positions so you
can create an arbitrary distribution and domain for positions.
For example, to use a 2D Gaussian distribution of node positions with mean
(0, 0) and standard deviation 2::
>>> import random
>>> n = 20
>>> p = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)}
>>> G = nx.random_geometric_graph(n, 0.2, pos=p)
References
----------
.. [1] Penrose, Mathew, Random Geometric Graphs,
Oxford Studies in Probability, 5, 2003.
"""
G=nx.Graph()
G.name="Random Geometric Graph"
G.add_nodes_from(range(n))
if pos is None:
# random positions
for n in G:
G.node[n]['pos']=[random.random() for i in range(0,dim)]
else:
nx.set_node_attributes(G,'pos',pos)
# connect nodes within "radius" of each other
# n^2 algorithm, could use a k-d tree implementation
nodes = G.nodes(data=True)
while nodes:
u,du = nodes.pop()
pu = du['pos']
for v,dv in nodes:
pv = dv['pos']
d = sum(((a-b)**2 for a,b in zip(pu,pv)))
if d <= radius**2:
G.add_edge(u,v)
return G
def geographical_threshold_graph(n, theta, alpha=2, dim=2, pos=None,
weight=None):
r"""Returns a geographical threshold graph.
The geographical threshold graph model places ``n`` nodes uniformly at
random in a rectangular domain. Each node `u` is assigned a weight `w_u`.
Two nodes `u` and `v` are joined by an edge if
.. math::
w_u + w_v \ge \theta r^{\alpha}
where `r` is the Euclidean distance between `u` and `v`, and `\theta`,
`\alpha` are parameters.
Parameters
----------
n : int
Number of nodes
theta: float
Threshold value
alpha: float, optional
Exponent of distance function
dim : int, optional
Dimension of graph
pos : dict
Node positions as a dictionary of tuples keyed by node.
weight : dict
Node weights as a dictionary of numbers keyed by node.
Returns
-------
Graph
Examples
--------
>>> G = nx.geographical_threshold_graph(20, 50)
Notes
-----
If weights are not specified they are assigned to nodes by drawing randomly
from the exponential distribution with rate parameter `\lambda=1`. To
specify weights from a different distribution, use the ``weight`` keyword
argument::
>>> import random
>>> n = 20
>>> w = {i: random.expovariate(5.0) for i in range(n)}
>>> G = nx.geographical_threshold_graph(20, 50, weight=w)
If node positions are not specified they are randomly assigned from the
uniform distribution.
References
----------
.. [1] Masuda, N., Miwa, H., Konno, N.:
Geographical threshold graphs with small-world and scale-free
properties.
Physical Review E 71, 036108 (2005)
.. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus,
Giant component and connectivity in geographical threshold graphs,
in Algorithms and Models for the Web-Graph (WAW 2007),
Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007
"""
G=nx.Graph()
# add n nodes
G.add_nodes_from([v for v in range(n)])
if weight is None:
# choose weights from exponential distribution
for n in G:
G.node[n]['weight'] = random.expovariate(1.0)
else:
nx.set_node_attributes(G,'weight',weight)
if pos is None:
# random positions
for n in G:
G.node[n]['pos']=[random.random() for i in range(0,dim)]
else:
nx.set_node_attributes(G,'pos',pos)
G.add_edges_from(geographical_threshold_edges(G, theta, alpha))
return G
def geographical_threshold_edges(G, theta, alpha=2):
"""Generates edges for a geographical threshold graph given a graph with
positions and weights assigned as node attributes ``'pos'`` and
``'weight'``.
"""
nodes = G.nodes(data=True)
while nodes:
u,du = nodes.pop()
wu = du['weight']
pu = du['pos']
for v,dv in nodes:
wv = dv['weight']
pv = dv['pos']
r = math.sqrt(sum(((a-b)**2 for a,b in zip(pu,pv))))
if wu+wv >= theta*r**alpha:
yield(u,v)
def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1)):
r"""Return a Waxman random graph.
The Waxman random graph model places ``n`` nodes uniformly at random in a
rectangular domain. Each pair of nodes at Euclidean distance `d` is joined
by an edge with probability
.. math::
p = \alpha \exp(-d / \beta L).
This function implements both Waxman models, using the ``L`` keyword
argument.
* Waxman-1: if ``L`` is not specified, it is set to be the maximum distance
between any pair of nodes.
* Waxman-2: if ``L`` is specified, the distance between a pair of nodes is
chosen uniformly at random from the interval `[0, L]`.
Parameters
----------
n : int
Number of nodes
alpha: float
Model parameter
beta: float
Model parameter
L : float, optional
Maximum distance between nodes. If not specified, the actual distance
is calculated.
domain : four-tuple of numbers, optional
Domain size, given as a tuple of the form `(x_min, y_min, x_max,
y_max)`.
Returns
-------
G: Graph
References
----------
.. [1] B. M. Waxman, Routing of multipoint connections.
IEEE J. Select. Areas Commun. 6(9),(1988) 1617-1622.
"""
# build graph of n nodes with random positions in the unit square
G = nx.Graph()
G.add_nodes_from(range(n))
(xmin,ymin,xmax,ymax)=domain
for n in G:
G.node[n]['pos']=(xmin + ((xmax-xmin)*random.random()),
ymin + ((ymax-ymin)*random.random()))
if L is None:
# find maximum distance L between two nodes
l = 0
pos = list(nx.get_node_attributes(G,'pos').values())
while pos:
x1,y1 = pos.pop()
for x2,y2 in pos:
r2 = (x1-x2)**2 + (y1-y2)**2
if r2 > l:
l = r2
l=math.sqrt(l)
else:
# user specified maximum distance
l = L
nodes=G.nodes()
if L is None:
# Waxman-1 model
# try all pairs, connect randomly based on euclidean distance
while nodes:
u = nodes.pop()
x1,y1 = G.node[u]['pos']
for v in nodes:
x2,y2 = G.node[v]['pos']
r = math.sqrt((x1-x2)**2 + (y1-y2)**2)
if random.random() < alpha*math.exp(-r/(beta*l)):
G.add_edge(u,v)
else:
# Waxman-2 model
# try all pairs, connect randomly based on randomly chosen l
while nodes:
u = nodes.pop()
for v in nodes:
r = random.random()*l
if random.random() < alpha*math.exp(-r/(beta*l)):
G.add_edge(u,v)
return G
def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
"""Return a navigable small-world graph.
A navigable small-world graph is a directed grid with additional long-range
connections that are chosen randomly.
[...] we begin with a set of nodes [...] that are identified with the set
of lattice points in an `n \times n` square, `\{(i, j): i \in \{1, 2,
\ldots, n\}, j \in \{1, 2, \ldots, n\}\}`, and we define the *lattice
distance* between two nodes `(i, j)` and `(k, l)` to be the number of
"lattice steps" separating them: `d((i, j), (k, l)) = |k - i| + |l - j|`.
For a universal constant `p \geq 1`, the node `u` has a directed edge to
every other node within lattice distance `p` --- these are its *local
contacts*. For universal constants `q \ge 0` and `r \ge 0` we also
construct directed edges from `u` to `q` other nodes (the *long-range
contacts*) using independent random trials; the `i`th directed edge from
`u` has endpoint `v` with probability proportional to `[d(u,v)]^{-r}`.
-- [1]_
Parameters
----------
n : int
The number of nodes.
p : int
The diameter of short range connections. Each node is joined with every
other node within this lattice distance.
q : int
The number of long-range connections for each node.
r : float
Exponent for decaying probability of connections. The probability of
connecting to a node at lattice distance `d` is `1/d^r`.
dim : int
Dimension of grid
seed : int, optional
Seed for random number generator (default=None).
References
----------
.. [1] J. Kleinberg. The small-world phenomenon: An algorithmic
perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000.
"""
if (p < 1):
raise nx.NetworkXException("p must be >= 1")
if (q < 0):
raise nx.NetworkXException("q must be >= 0")
if (r < 0):
raise nx.NetworkXException("r must be >= 1")
if not seed is None:
random.seed(seed)
G = nx.DiGraph()
nodes = list(product(range(n),repeat=dim))
for p1 in nodes:
probs = [0]
for p2 in nodes:
if p1==p2:
continue
d = sum((abs(b-a) for a,b in zip(p1,p2)))
if d <= p:
G.add_edge(p1,p2)
probs.append(d**-r)
cdf = list(nx.utils.accumulate(probs))
for _ in range(q):
target = nodes[bisect_left(cdf,random.uniform(0, cdf[-1]))]
G.add_edge(p1,target)
return G
| gpl-3.0 |
sgraham/nope | v8/build/detect_v8_host_arch.py | 53 | 2703 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Outputs host CPU architecture in format recognized by gyp."""
import platform
import re
import sys
def main():
print DoMain([])
return 0
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
host_arch = 'ia32'
elif host_arch in ['x86_64', 'amd64']:
host_arch = 'x64'
elif host_arch.startswith('arm'):
host_arch = 'arm'
elif host_arch == 'aarch64':
host_arch = 'arm64'
elif host_arch == 'mips64':
host_arch = 'mips64el'
elif host_arch.startswith('mips'):
host_arch = 'mipsel'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
# the python binary.
if host_arch == 'x64' and platform.architecture()[0] == '32bit':
host_arch = 'ia32'
return host_arch
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
wagtail/wagtail | wagtail/contrib/redirects/base_formats.py | 7 | 6511 | """
Copyright (c) Bojan Mihelac and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# https://raw.githubusercontent.com/django-import-export/django-import-export/master/import_export/formats/base_formats.py
from importlib import import_module
import tablib
class Format:
def get_title(self):
return type(self)
def create_dataset(self, in_stream):
"""
Create dataset from given string.
"""
raise NotImplementedError()
def export_data(self, dataset, **kwargs):
"""
Returns format representation for given dataset.
"""
raise NotImplementedError()
def is_binary(self):
"""
Returns if this format is binary.
"""
return True
def get_read_mode(self):
"""
Returns mode for opening files.
"""
return 'rb'
def get_extension(self):
"""
Returns extension for this format files.
"""
return ""
def get_content_type(self):
# For content types see
# https://www.iana.org/assignments/media-types/media-types.xhtml
return 'application/octet-stream'
@classmethod
def is_available(cls):
return True
def can_import(self):
return False
def can_export(self):
return False
class TablibFormat(Format):
TABLIB_MODULE = None
CONTENT_TYPE = 'application/octet-stream'
def get_format(self):
"""
Import and returns tablib module.
"""
try:
# Available since tablib 1.0
from tablib.formats import registry
except ImportError:
return import_module(self.TABLIB_MODULE)
else:
key = self.TABLIB_MODULE.split('.')[-1].replace('_', '')
return registry.get_format(key)
@classmethod
def is_available(cls):
try:
cls().get_format()
except (tablib.core.UnsupportedFormat, ImportError):
return False
return True
def get_title(self):
return self.get_format().title
def create_dataset(self, in_stream, **kwargs):
return tablib.import_set(in_stream, format=self.get_title())
def export_data(self, dataset, **kwargs):
return dataset.export(self.get_title(), **kwargs)
def get_extension(self):
return self.get_format().extensions[0]
def get_content_type(self):
return self.CONTENT_TYPE
def can_import(self):
return hasattr(self.get_format(), 'import_set')
def can_export(self):
return hasattr(self.get_format(), 'export_set')
class TextFormat(TablibFormat):
def get_read_mode(self):
return 'r'
def is_binary(self):
return False
class CSV(TextFormat):
TABLIB_MODULE = 'tablib.formats._csv'
CONTENT_TYPE = 'text/csv'
def create_dataset(self, in_stream, **kwargs):
return super().create_dataset(in_stream, **kwargs)
class JSON(TextFormat):
TABLIB_MODULE = 'tablib.formats._json'
CONTENT_TYPE = 'application/json'
class YAML(TextFormat):
TABLIB_MODULE = 'tablib.formats._yaml'
# See https://stackoverflow.com/questions/332129/yaml-mime-type
CONTENT_TYPE = 'text/yaml'
class TSV(TextFormat):
TABLIB_MODULE = 'tablib.formats._tsv'
CONTENT_TYPE = 'text/tab-separated-values'
def create_dataset(self, in_stream, **kwargs):
return super().create_dataset(in_stream, **kwargs)
class ODS(TextFormat):
TABLIB_MODULE = 'tablib.formats._ods'
CONTENT_TYPE = 'application/vnd.oasis.opendocument.spreadsheet'
class HTML(TextFormat):
TABLIB_MODULE = 'tablib.formats._html'
CONTENT_TYPE = 'text/html'
class XLS(TablibFormat):
TABLIB_MODULE = 'tablib.formats._xls'
CONTENT_TYPE = 'application/vnd.ms-excel'
def create_dataset(self, in_stream):
"""
Create dataset from first sheet.
"""
import xlrd
xls_book = xlrd.open_workbook(file_contents=in_stream)
dataset = tablib.Dataset()
sheet = xls_book.sheets()[0]
dataset.headers = sheet.row_values(0)
for i in range(1, sheet.nrows):
dataset.append(sheet.row_values(i))
return dataset
class XLSX(TablibFormat):
TABLIB_MODULE = 'tablib.formats._xlsx'
CONTENT_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
def create_dataset(self, in_stream):
"""
Create dataset from first sheet.
"""
from io import BytesIO
import openpyxl
xlsx_book = openpyxl.load_workbook(BytesIO(in_stream), read_only=True)
dataset = tablib.Dataset()
sheet = xlsx_book.active
# obtain generator
rows = sheet.rows
dataset.headers = [cell.value for cell in next(rows)]
for row in rows:
row_values = [cell.value for cell in row]
dataset.append(row_values)
return dataset
#: These are the default formats for import and export. Whether they can be
#: used or not is depending on their implementation in the tablib library.
DEFAULT_FORMATS = [fmt for fmt in (
CSV,
XLS,
XLSX,
TSV,
ODS,
JSON,
YAML,
HTML,
) if fmt.is_available()]
| bsd-3-clause |
njwilson23/scipy | scipy/linalg/tests/test_solve_toeplitz.py | 94 | 3975 | """Test functions for linalg._solve_toeplitz module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg._solve_toeplitz import levinson
from scipy.linalg import solve, toeplitz, solve_toeplitz
from numpy.testing import (run_module_suite, assert_equal, assert_allclose,
assert_raises)
from numpy.testing.decorators import knownfailureif
def test_solve_equivalence():
# For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
random = np.random.RandomState(1234)
for n in (1, 2, 3, 10):
c = random.randn(n)
if random.rand() < 0.5:
c = c + 1j * random.randn(n)
r = random.randn(n)
if random.rand() < 0.5:
r = r + 1j * random.randn(n)
y = random.randn(n)
if random.rand() < 0.5:
y = y + 1j * random.randn(n)
# Check equivalence when both the column and row are provided.
actual = solve_toeplitz((c,r), y)
desired = solve(toeplitz(c, r=r), y)
assert_allclose(actual, desired)
# Check equivalence when the column is provided but not the row.
actual = solve_toeplitz(c, b=y)
desired = solve(toeplitz(c), y)
assert_allclose(actual, desired)
def test_multiple_rhs():
random = np.random.RandomState(1234)
c = random.randn(4)
r = random.randn(4)
for offset in [0, 1j]:
for yshape in ((4,), (4, 3), (4, 3, 2)):
y = random.randn(*yshape) + offset
actual = solve_toeplitz((c,r), b=y)
desired = solve(toeplitz(c, r=r), y)
assert_equal(actual.shape, yshape)
assert_equal(desired.shape, yshape)
assert_allclose(actual, desired)
def test_zero_diag_error():
# The Levinson-Durbin implementation fails when the diagonal is zero.
random = np.random.RandomState(1234)
n = 4
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
c[0] = 0
assert_raises(np.linalg.LinAlgError,
solve_toeplitz, (c, r), b=y)
def test_wikipedia_counterexample():
# The Levinson-Durbin implementation also fails in other cases.
# This example is from the talk page of the wikipedia article.
random = np.random.RandomState(1234)
c = [2, 2, 1]
y = random.randn(3)
assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)
def test_reflection_coeffs():
# check that that the partial solutions are given by the reflection
# coefficients
random = np.random.RandomState(1234)
y_d = random.randn(10)
y_z = random.randn(10) + 1j
reflection_coeffs_d = [1]
reflection_coeffs_z = [1]
for i in range(2, 10):
reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])
y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
_, ref_d = levinson(y_d_concat, b=y_d[1:])
_, ref_z = levinson(y_z_concat, b=y_z[1:])
assert_allclose(reflection_coeffs_d, ref_d[:-1])
assert_allclose(reflection_coeffs_z, ref_z[:-1])
@knownfailureif(True, 'Instability of Levinson iteraton')
def test_unstable():
# this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
# I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
# Partial Pivoting for Matrices with Displacement Structure"
# Mathematics of Computation, 64, 212 (1995), pp 1557-1576
# which can be unstable for levinson recursion.
# other fast toeplitz solvers such as GKO or Burg should be better.
random = np.random.RandomState(1234)
n = 100
c = 0.9 ** (np.arange(n)**2)
y = random.randn(n)
solution1 = solve_toeplitz(c, b=y)
solution2 = solve(toeplitz(c), y)
assert_allclose(solution1, solution2)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
adamsd5/yavalath | memorycontrol.py | 1 | 4959 | """This holds a routine for restricting the current process memory on Windows."""
import multiprocessing
import ctypes
def set_memory_limit(memory_limit):
"""Creates a new unnamed job object and assigns the current process to it.
The job object will have the given memory limit in bytes: the given process
together with its descendant processes will not be allowed to exceed
the limit. If purge_pid_on_exit is true, when the *calling* process exits
(the calling process can be the same or different from the given process),
the given process and all its descendant processes will be killed."""
import os
pid = os.getpid()
purge_pid_on_exit = True
# Windows API constants, used for OpenProcess and SetInformationJobObject.
PROCESS_TERMINATE = 0x1
PROCESS_SET_QUOTA = 0x100
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x100
JOB_OBJECT_LIMIT_JOB_MEMORY = 0x200
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x2000
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
"""Windows API structure, used as input to SetInformationJobObject."""
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [("PerProcessUserTimeLimit", ctypes.c_int64),
("PerJobUserTimeLimit", ctypes.c_int64),
("LimitFlags", ctypes.c_uint32),
("MinimumWorkingSetSize", ctypes.c_void_p),
("MaximumWorkingSetSize", ctypes.c_void_p),
("ActiveProcessLimit", ctypes.c_uint32),
("Affinity", ctypes.c_void_p),
("PriorityClass", ctypes.c_uint32),
("SchedulingClass", ctypes.c_uint32)]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [("ReadOperationCount", ctypes.c_uint64),
("WriteOperationCount", ctypes.c_uint64),
("OtherOperationCount", ctypes.c_uint64),
("ReadTransferCount", ctypes.c_uint64),
("WriteTransferCount", ctypes.c_uint64),
("OtherTransferCount", ctypes.c_uint64)]
_fields_ = [("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_void_p),
("JobMemoryLimit", ctypes.c_void_p),
("PeakProcessMemoryUsed", ctypes.c_void_p),
("PeakJobMemoryUsed", ctypes.c_void_p)]
job_info = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
job_info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_JOB_MEMORY
if purge_pid_on_exit:
job_info.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
job_info.JobMemoryLimit = memory_limit
kernel = ctypes.windll.kernel32
job = kernel.CreateJobObjectA(None, None)
if job == 0:
raise RuntimeError("CreateJobObjectA failed")
keep_job_handle = False
try:
if not kernel.SetInformationJobObject(
job,
JobObjectExtendedLimitInformation,
ctypes.POINTER(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)(job_info),
ctypes.sizeof(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)):
raise RuntimeError("SetInformationJobObject failed")
process = kernel.OpenProcess(PROCESS_SET_QUOTA | PROCESS_TERMINATE,False, pid)
if process == 0:
raise RuntimeError("OpenProcess failed")
try:
if not kernel.AssignProcessToJobObject(job, process):
raise RuntimeError("AssignProcessToJobObject failed")
# If purge_pid_on_exit is true, we kill process pid and all its
# descendants when the job handle is closed. So, we keep the handle
# dangling, and it will be closed when *this* process terminates.
keep_job_handle = purge_pid_on_exit
finally:
if not kernel.CloseHandle(process):
raise RuntimeError("CloseHandle failed")
finally:
if not (keep_job_handle or kernel.CloseHandle(job)):
raise RuntimeError("CloseHandle failed")
def allocate(bytes):
import numpy
try:
result = numpy.zeros(shape=(bytes,), dtype='i1')
print("allocation done:", bytes)
except Exception as ex:
print("Failed to allocate:", ex)
raise
def runner(thunk, memory_limit, *args):
set_memory_limit(memory_limit)
thunk(*args)
def run_in_process_with_memory_limit(thunk, memory_limit, test_bytes):
p = multiprocessing.Process(target=runner, args=(thunk, memory_limit, test_bytes))
p.start()
p.join()
def main():
memory_limit = 1000*1000*100
run_in_process_with_memory_limit(allocate, memory_limit=memory_limit, test_bytes=memory_limit)
if __name__ == "__main__":
main()
| mit |
ryuunosukeyoshi/PartnerPoi-Bot | lib/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
xiaolonginfo/decode-Django | Django-1.5.1/django/contrib/gis/tests/geoapp/models.py | 112 | 1877 | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
from django.utils.encoding import python_2_unicode_compatible
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __str__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __str__(self): return self.name
class Truth(models.Model):
val = models.BooleanField()
objects = models.GeoManager()
if not spatialite:
@python_2_unicode_compatible
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __str__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| gpl-2.0 |
nichdu/duplicity-onedrive | duplicity/commandline.py | 2 | 45676 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Parse command line, check for consistency, and set globals"""
from future_builtins import filter
from copy import copy
import optparse
import os
import re
import sys
import socket
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
from duplicity import backend
from duplicity import dup_time
from duplicity import globals
from duplicity import gpg
from duplicity import log
from duplicity import path
from duplicity import selection
from duplicity import util
select_opts = [] # Will hold all the selection options
select_files = [] # Will hold file objects when filelist given
full_backup = None # Will be set to true if full command given
list_current = None # Will be set to true if list-current command given
collection_status = None # Will be set to true if collection-status command given
cleanup = None # Set to true if cleanup command given
verify = None # Set to true if verify command given
commands = ["cleanup",
"collection-status",
"full",
"incremental",
"list-current-files",
"remove-older-than",
"remove-all-but-n-full",
"remove-all-inc-of-but-n-full",
"restore",
"verify",
]
def old_fn_deprecation(opt):
log.Log(_("Warning: Option %s is pending deprecation "
"and will be removed in a future release.\n"
"Use of default filenames is strongly suggested.") % opt,
log.ERROR, force_print=True)
def expand_fn(filename):
return os.path.expanduser(os.path.expandvars(filename))
def expand_archive_dir(archdir, backname):
"""
Return expanded version of archdir joined with backname.
"""
assert globals.backup_name is not None, \
"expand_archive_dir() called prior to globals.backup_name being set"
return expand_fn(os.path.join(archdir, backname))
def generate_default_backup_name(backend_url):
"""
@param backend_url: URL to backend.
@returns A default backup name (string).
"""
# For default, we hash args to obtain a reasonably safe default.
# We could be smarter and resolve things like relative paths, but
# this should actually be a pretty good compromise. Normally only
# the destination will matter since you typically only restart
# backups of the same thing to a given destination. The inclusion
# of the source however, does protect against most changes of
# source directory (for whatever reason, such as
# /path/to/different/snapshot). If the user happens to have a case
# where relative paths are used yet the relative path is the same
# (but duplicity is run from a different directory or similar),
# then it is simply up to the user to set --archive-dir properly.
burlhash = md5()
burlhash.update(backend_url)
return burlhash.hexdigest()
def check_file(option, opt, value):
return expand_fn(value)
def check_time(option, opt, value):
try:
return dup_time.genstrtotime(value)
except dup_time.TimeException as e:
raise optparse.OptionValueError(str(e))
def check_verbosity(option, opt, value):
fail = False
value = value.lower()
if value in ['e', 'error']:
verb = log.ERROR
elif value in ['w', 'warning']:
verb = log.WARNING
elif value in ['n', 'notice']:
verb = log.NOTICE
elif value in ['i', 'info']:
verb = log.INFO
elif value in ['d', 'debug']:
verb = log.DEBUG
else:
try:
verb = int(value)
if verb < 0 or verb > 9:
fail = True
except ValueError:
fail = True
if fail:
# TRANSL: In this portion of the usage instructions, "[ewnid]" indicates which
# characters are permitted (e, w, n, i, or d); the brackets imply their own
# meaning in regex; i.e., only one of the characters is allowed in an instance.
raise optparse.OptionValueError("Verbosity must be one of: digit [0-9], character [ewnid], "
"or word ['error', 'warning', 'notice', 'info', 'debug']. "
"The default is 4 (Notice). It is strongly recommended "
"that verbosity level is set at 2 (Warning) or higher.")
return verb
class DupOption(optparse.Option):
TYPES = optparse.Option.TYPES + ("file", "time", "verbosity",)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["file"] = check_file
TYPE_CHECKER["time"] = check_time
TYPE_CHECKER["verbosity"] = check_verbosity
ACTIONS = optparse.Option.ACTIONS + ("extend",)
STORE_ACTIONS = optparse.Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = optparse.Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = optparse.Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
if not value:
return
if hasattr(values, dest) and getattr(values, dest):
setattr(values, dest, getattr(values, dest) + ' ' + value)
else:
setattr(values, dest, value)
else:
optparse.Option.take_action(
self, action, dest, opt, value, values, parser)
"""
Fix:
File "/usr/lib/pythonX.X/optparse.py", line XXXX, in print_help
file.write(self.format_help().encode(encoding, "replace"))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xXX in position XXXX:
See:
http://bugs.python.org/issue2931
http://mail.python.org/pipermail/python-dev/2006-May/065458.html
"""
class OPHelpFix(optparse.OptionParser):
def _get_encoding(self, file):
"""
try to get the encoding or use UTF-8
which is default encoding in python3 and most recent unixes
"""
encoding = getattr(file, "encoding", None)
return encoding or 'utf-8'
def print_help(self, file = None):
"""
overwrite method with proper utf-8 decoding
"""
if file is None:
file = sys.stdout
encoding = self._get_encoding(file)
help = self.format_help()
# The help is in unicode or bytes depending on the user's locale
if not isinstance(help, unicode):
help = self.format_help().decode('utf-8')
file.write(help.encode(encoding, "replace"))
def parse_cmdline_options(arglist):
"""Parse argument list"""
global select_opts, select_files, full_backup
global list_current, collection_status, cleanup, remove_time, verify
def set_log_fd(fd):
if fd < 1:
raise optparse.OptionValueError("log-fd must be greater than zero.")
log.add_fd(fd)
def set_time_sep(sep, opt):
if sep == '-':
raise optparse.OptionValueError("Dash ('-') not valid for time-separator.")
globals.time_separator = sep
old_fn_deprecation(opt)
def add_selection(o, s, v, p):
select_opts.append((s, v))
def add_filelist(o, s, v, p):
filename = v
select_opts.append((s, filename))
try:
select_files.append(open(filename, "r"))
except IOError:
log.FatalError(_("Error opening file %s") % filename,
log.ErrorCode.cant_open_filelist)
def print_ver(o, s, v, p):
print "duplicity %s" % (globals.version)
sys.exit(0)
def add_rename(o, s, v, p):
globals.rename[os.path.normcase(os.path.normpath(v[0]))] = v[1]
parser = OPHelpFix(option_class = DupOption, usage = usage())
# If this is true, only warn and don't raise fatal error when backup
# source directory doesn't match previous backup source directory.
parser.add_option("--allow-source-mismatch", action = "store_true")
# Set to the path of the archive directory (the directory which
# contains the signatures and manifests of the relevent backup
# collection), and for checkpoint state between volumes.
# TRANSL: Used in usage help to represent a Unix-style path name. Example:
# --archive-dir <path>
parser.add_option("--archive-dir", type = "file", metavar = _("path"))
# Asynchronous put/get concurrency limit
# (default of 0 disables asynchronicity).
parser.add_option("--asynchronous-upload", action = "store_const", const = 1,
dest = "async_concurrency")
parser.add_option("--compare-data", action = "store_true")
# config dir for future use
parser.add_option("--config-dir", type = "file", metavar = _("path"),
help = optparse.SUPPRESS_HELP)
# for testing -- set current time
parser.add_option("--current-time", type = "int",
dest = "current_time", help = optparse.SUPPRESS_HELP)
# Don't actually do anything, but still report what would be done
parser.add_option("--dry-run", action = "store_true")
# TRANSL: Used in usage help to represent an ID for a GnuPG key. Example:
# --encrypt-key <gpg_key_id>
parser.add_option("--encrypt-key", type = "string", metavar = _("gpg-key-id"),
dest = "", action = "callback",
callback = lambda o, s, v, p: globals.gpg_profile.recipients.append(v)) # @UndefinedVariable
# secret keyring in which the private encrypt key can be found
parser.add_option("--encrypt-secret-keyring", type = "string", metavar = _("path"))
parser.add_option("--encrypt-sign-key", type = "string", metavar = _("gpg-key-id"),
dest = "", action = "callback",
callback = lambda o, s, v, p: (globals.gpg_profile.recipients.append(v), set_sign_key(v)))
# TRANSL: Used in usage help to represent a "glob" style pattern for
# matching one or more files, as described in the documentation.
# Example:
# --exclude <shell_pattern>
parser.add_option("--exclude", action = "callback", metavar = _("shell_pattern"),
dest = "", type = "string", callback = add_selection)
parser.add_option("--exclude-device-files", action = "callback",
dest = "", callback = add_selection)
parser.add_option("--exclude-filelist", type = "file", metavar = _("filename"),
dest = "", action = "callback", callback = add_filelist)
parser.add_option("--exclude-filelist-stdin", action = "callback", dest = "",
callback = lambda o, s, v, p: (select_opts.append(("--exclude-filelist", "standard input")),
select_files.append(sys.stdin)))
parser.add_option("--exclude-globbing-filelist", type = "file", metavar = _("filename"),
dest = "", action = "callback", callback = add_filelist)
# TRANSL: Used in usage help to represent the name of a file. Example:
# --log-file <filename>
parser.add_option("--exclude-if-present", metavar = _("filename"), dest = "",
type = "file", action = "callback", callback = add_selection)
parser.add_option("--exclude-other-filesystems", action = "callback",
dest = "", callback = add_selection)
# TRANSL: Used in usage help to represent a regular expression (regexp).
parser.add_option("--exclude-regexp", metavar = _("regular_expression"),
dest = "", type = "string", action = "callback", callback = add_selection)
# Whether we should be particularly aggressive when cleaning up
parser.add_option("--extra-clean", action = "store_true")
# used in testing only - raises exception after volume
parser.add_option("--fail-on-volume", type = "int",
help = optparse.SUPPRESS_HELP)
# used to provide a prefix on top of the defaul tar file name
parser.add_option("--file-prefix", type = "string", dest = "file_prefix", action = "store")
# used to provide a suffix for manifest files only
parser.add_option("--file-prefix-manifest", type = "string", dest = "file_prefix_manifest", action = "store")
# used to provide a suffix for archive files only
parser.add_option("--file-prefix-archive", type = "string", dest = "file_prefix_archive", action = "store")
# used to provide a suffix for sigature files only
parser.add_option("--file-prefix-signature", type = "string", dest = "file_prefix_signature", action = "store")
# used in testing only - skips upload for a given volume
parser.add_option("--skip-volume", type = "int",
help = optparse.SUPPRESS_HELP)
# If set, restore only the subdirectory or file specified, not the
# whole root.
# TRANSL: Used in usage help to represent a Unix-style path name. Example:
# --archive-dir <path>
parser.add_option("--file-to-restore", "-r", action = "callback", type = "file",
metavar = _("path"), dest = "restore_dir",
callback = lambda o, s, v, p: setattr(p.values, "restore_dir", v.rstrip('/')))
# Used to confirm certain destructive operations like deleting old files.
parser.add_option("--force", action = "store_true")
# FTP data connection type
parser.add_option("--ftp-passive", action = "store_const", const = "passive", dest = "ftp_connection")
parser.add_option("--ftp-regular", action = "store_const", const = "regular", dest = "ftp_connection")
# If set, forces a full backup if the last full backup is older than
# the time specified
parser.add_option("--full-if-older-than", type = "time", dest = "full_force_time", metavar = _("time"))
parser.add_option("--gio",action = "callback", dest = "use_gio",
callback = lambda o, s, v, p: (setattr(p.values, o.dest, True),
old_fn_deprecation(s)))
parser.add_option("--gpg-options", action = "extend", metavar = _("options"))
# TRANSL: Used in usage help to represent an ID for a hidden GnuPG key. Example:
# --hidden-encrypt-key <gpg_key_id>
parser.add_option("--hidden-encrypt-key", type = "string", metavar = _("gpg-key-id"),
dest = "", action = "callback",
callback = lambda o, s, v, p: globals.gpg_profile.hidden_recipients.append(v)) # @UndefinedVariable
# ignore (some) errors during operations; supposed to make it more
# likely that you are able to restore data under problematic
# circumstances. the default should absolutely always be False unless
# you know what you are doing.
parser.add_option("--ignore-errors", action = "callback",
dest = "ignore_errors",
callback = lambda o, s, v, p: (log.Warn(
_("Running in 'ignore errors' mode due to %s; please "
"re-consider if this was not intended") % s),
setattr(p.values, "ignore errors", True)))
# Whether to use the full email address as the user name when
# logging into an imap server. If false just the user name
# part of the email address is used.
parser.add_option("--imap-full-address", action = "store_true",
help = optparse.SUPPRESS_HELP)
# Name of the imap folder where we want to store backups.
# Can be changed with a command line argument.
# TRANSL: Used in usage help to represent an imap mailbox
parser.add_option("--imap-mailbox", metavar = _("imap_mailbox"))
parser.add_option("--include", action = "callback", metavar = _("shell_pattern"),
dest = "", type = "string", callback = add_selection)
parser.add_option("--include-filelist", type = "file", metavar = _("filename"),
dest = "", action = "callback", callback = add_filelist)
parser.add_option("--include-filelist-stdin", action = "callback", dest = "",
callback = lambda o, s, v, p: (select_opts.append(("--include-filelist", "standard input")),
select_files.append(sys.stdin)))
parser.add_option("--include-globbing-filelist", type = "file", metavar = _("filename"),
dest = "", action = "callback", callback = add_filelist)
parser.add_option("--include-regexp", metavar = _("regular_expression"), dest = "",
type = "string", action = "callback", callback = add_selection)
parser.add_option("--log-fd", type = "int", metavar = _("file_descriptor"),
dest = "", action = "callback",
callback = lambda o, s, v, p: set_log_fd(v))
# TRANSL: Used in usage help to represent the name of a file. Example:
# --log-file <filename>
parser.add_option("--log-file", type = "file", metavar = _("filename"),
dest = "", action = "callback",
callback = lambda o, s, v, p: log.add_file(v))
# Maximum block size for large files
parser.add_option("--max-blocksize", type = "int", metavar = _("number"))
# TRANSL: Used in usage help (noun)
parser.add_option("--name", dest = "backup_name", metavar = _("backup name"))
# If set to false, then do not encrypt files on remote system
parser.add_option("--no-encryption", action = "store_false", dest = "encryption")
# If set to false, then do not compress files on remote system
parser.add_option("--no-compression", action = "store_false", dest = "compression")
# If set, print the statistics after every backup session
parser.add_option("--no-print-statistics", action = "store_false", dest = "print_statistics")
# If true, filelists and directory statistics will be split on
# nulls instead of newlines.
parser.add_option("--null-separator", action = "store_true")
# number of retries on network operations
# TRANSL: Used in usage help to represent a desired number of
# something. Example:
# --num-retries <number>
parser.add_option("--num-retries", type = "int", metavar = _("number"))
# File owner uid keeps number from tar file. Like same option in GNU tar.
parser.add_option("--numeric-owner", action = "store_true")
# Whether the old filename format is in effect.
parser.add_option("--old-filenames", action = "callback",
dest = "old_filenames",
callback = lambda o, s, v, p: (setattr(p.values, o.dest, True),
old_fn_deprecation(s)))
# Level of Redundancy in % for Par2 files
parser.add_option("--par2-redundancy", type = "int", metavar = _("number"))
# Verbatim par2 options
parser.add_option("--par2-options", action = "extend", metavar = _("options"))
# Used to display the progress for the full and incremental backup operations
parser.add_option("--progress", action = "store_true")
# Used to control the progress option update rate in seconds. Default: prompts each 3 seconds
parser.add_option("--progress-rate", type = "int", metavar = _("number"))
# option to trigger Pydev debugger
parser.add_option("--pydevd", action = "store_true")
# option to rename files during restore
parser.add_option("--rename", type = "file", action = "callback", nargs = 2,
callback = add_rename)
# Restores will try to bring back the state as of the following time.
# If it is None, default to current time.
# TRANSL: Used in usage help to represent a time spec for a previous
# point in time, as described in the documentation. Example:
# duplicity remove-older-than time [options] target_url
parser.add_option("--restore-time", "--time", "-t", type = "time", metavar = _("time"))
# user added rsync options
parser.add_option("--rsync-options", action = "extend", metavar = _("options"))
# Whether to create European buckets (sorry, hard-coded to only
# support european for now).
parser.add_option("--s3-european-buckets", action = "store_true")
# Whether to use S3 Reduced Redudancy Storage
parser.add_option("--s3-use-rrs", action = "store_true")
# Whether to use "new-style" subdomain addressing for S3 buckets. Such
# use is not backwards-compatible with upper-case buckets, or buckets
# that are otherwise not expressable in a valid hostname.
parser.add_option("--s3-use-new-style", action = "store_true")
# Whether to use plain HTTP (without SSL) to send data to S3
# See <https://bugs.launchpad.net/duplicity/+bug/433970>.
parser.add_option("--s3-unencrypted-connection", action = "store_true")
# Chunk size used for S3 multipart uploads.The number of parallel uploads to
# S3 be given by chunk size / volume size. Use this to maximize the use of
# your bandwidth. Defaults to 25MB
parser.add_option("--s3-multipart-chunk-size", type = "int", action = "callback", metavar = _("number"),
callback = lambda o, s, v, p: setattr(p.values, "s3_multipart_chunk_size", v * 1024 * 1024))
# Number of processes to set the Processor Pool to when uploading multipart
# uploads to S3. Use this to control the maximum simultaneous uploads to S3.
parser.add_option("--s3-multipart-max-procs", type="int", metavar=_("number"))
# Number of seconds to wait for each part of a multipart upload to S3. Use this
# to prevent hangups when doing a multipart upload to S3.
parser.add_option("--s3_multipart_max_timeout", type="int", metavar=_("number"))
# Option to allow the s3/boto backend use the multiprocessing version.
parser.add_option("--s3-use-multiprocessing", action = "store_true")
# Option to allow use of server side encryption in s3
parser.add_option("--s3-use-server-side-encryption", action="store_true", dest="s3_use_sse")
# scp command to use (ssh pexpect backend)
parser.add_option("--scp-command", metavar = _("command"))
# sftp command to use (ssh pexpect backend)
parser.add_option("--sftp-command", metavar = _("command"))
# allow the user to switch cloudfiles backend
parser.add_option("--cf-backend", metavar = _("pyrax|cloudfiles"))
# If set, use short (< 30 char) filenames for all the remote files.
parser.add_option("--short-filenames", action = "callback",
dest = "short_filenames",
callback = lambda o, s, v, p: (setattr(p.values, o.dest, True),
old_fn_deprecation(s)))
# TRANSL: Used in usage help to represent an ID for a GnuPG key. Example:
# --encrypt-key <gpg_key_id>
parser.add_option("--sign-key", type = "string", metavar = _("gpg-key-id"),
dest = "", action = "callback",
callback = lambda o, s, v, p: set_sign_key(v))
# default to batch mode using public-key encryption
parser.add_option("--ssh-askpass", action = "store_true")
# allow the user to switch ssh backend
parser.add_option("--ssh-backend", metavar = _("paramiko|pexpect"))
# user added ssh options
parser.add_option("--ssh-options", action = "extend", metavar = _("options"))
# user added ssl options (webdav backend)
parser.add_option("--ssl-cacert-file", metavar = _("pem formatted bundle of certificate authorities"))
parser.add_option("--ssl-no-check-certificate", action = "store_true")
# Working directory for the tempfile module. Defaults to /tmp on most systems.
parser.add_option("--tempdir", dest = "temproot", type = "file", metavar = _("path"))
# network timeout value
# TRANSL: Used in usage help. Example:
# --timeout <seconds>
parser.add_option("--timeout", type = "int", metavar = _("seconds"))
# Character used like the ":" in time strings like
# 2002-08-06T04:22:00-07:00. The colon isn't good for filenames on
# windows machines.
# TRANSL: abbreviation for "character" (noun)
parser.add_option("--time-separator", type = "string", metavar = _("char"),
action = "callback",
callback = lambda o, s, v, p: set_time_sep(v, s))
# Whether to specify --use-agent in GnuPG options
parser.add_option("--use-agent", action = "store_true")
parser.add_option("--use-scp", action = "store_true")
parser.add_option("--verbosity", "-v", type = "verbosity", metavar = "[0-9]",
dest = "", action = "callback",
callback = lambda o, s, v, p: log.setverbosity(v))
parser.add_option("-V", "--version", action = "callback", callback = print_ver)
# volume size
# TRANSL: Used in usage help to represent a desired number of
# something. Example:
# --num-retries <number>
parser.add_option("--volsize", type = "int", action = "callback", metavar = _("number"),
callback = lambda o, s, v, p: setattr(p.values, "volsize", v * 1024 * 1024))
# parse the options
(options, args) = parser.parse_args()
# Copy all arguments and their values to the globals module. Don't copy
# attributes that are 'hidden' (start with an underscore) or whose name is
# the empty string (used for arguments that don't directly store a value
# by using dest="")
for f in filter(lambda x: x and not x.startswith("_"), dir(options)):
v = getattr(options, f)
# Only set if v is not None because None is the default for all the
# variables. If user didn't set it, we'll use defaults in globals.py
if v is not None:
setattr(globals, f, v)
socket.setdefaulttimeout(globals.timeout)
# expect no cmd and two positional args
cmd = ""
num_expect = 2
# process first arg as command
if args:
cmd = args.pop(0)
possible = [c for c in commands if c.startswith(cmd)]
# no unique match, that's an error
if len(possible) > 1:
command_line_error("command '%s' not unique, could be %s" % (cmd, possible))
# only one match, that's a keeper
elif len(possible) == 1:
cmd = possible[0]
# no matches, assume no cmd
elif not possible:
args.insert(0, cmd)
if cmd == "cleanup":
cleanup = True
num_expect = 1
elif cmd == "collection-status":
collection_status = True
num_expect = 1
elif cmd == "full":
full_backup = True
num_expect = 2
elif cmd == "incremental":
globals.incremental = True
num_expect = 2
elif cmd == "list-current-files":
list_current = True
num_expect = 1
elif cmd == "remove-older-than":
try:
arg = args.pop(0)
except Exception:
command_line_error("Missing time string for remove-older-than")
globals.remove_time = dup_time.genstrtotime(arg)
num_expect = 1
elif cmd == "remove-all-but-n-full" or cmd == "remove-all-inc-of-but-n-full":
if cmd == "remove-all-but-n-full" :
globals.remove_all_but_n_full_mode = True
if cmd == "remove-all-inc-of-but-n-full" :
globals.remove_all_inc_of_but_n_full_mode = True
try:
arg = args.pop(0)
except Exception:
command_line_error("Missing count for " + cmd)
globals.keep_chains = int(arg)
if not globals.keep_chains > 0:
command_line_error(cmd + " count must be > 0")
num_expect = 1
elif cmd == "verify":
verify = True
if len(args) != num_expect:
command_line_error("Expected %d args, got %d" % (num_expect, len(args)))
# expand pathname args, but not URL
for loc in range(len(args)):
if not '://' in args[loc]:
args[loc] = expand_fn(args[loc])
# Note that ProcessCommandLine depends on us verifying the arg
# count here; do not remove without fixing it. We must make the
# checks here in order to make enough sense of args to identify
# the backend URL/lpath for args_to_path_backend().
if len(args) < 1:
command_line_error("Too few arguments")
elif len(args) == 1:
backend_url = args[0]
elif len(args) == 2:
lpath, backend_url = args_to_path_backend(args[0], args[1]) # @UnusedVariable
else:
command_line_error("Too many arguments")
if globals.backup_name is None:
globals.backup_name = generate_default_backup_name(backend_url)
# set and expand archive dir
set_archive_dir(expand_archive_dir(globals.archive_dir,
globals.backup_name))
log.Info(_("Using archive dir: %s") % (util.ufn(globals.archive_dir.name),))
log.Info(_("Using backup name: %s") % (globals.backup_name,))
return args
def command_line_error(message):
"""Indicate a command line error and exit"""
log.FatalError(_("Command line error: %s") % (message,) + "\n" +
_("Enter 'duplicity --help' for help screen."),
log.ErrorCode.command_line)
def usage():
"""Returns terse usage info. The code is broken down into pieces for ease of
translation maintenance. Any comments that look extraneous or redundant should
be assumed to be for the benefit of translators, since they can get each string
(paired with its preceding comment, if any) independently of the others."""
dict = {
# TRANSL: Used in usage help to represent a Unix-style path name. Example:
# rsync://user[:password]@other_host[:port]//absolute_path
'absolute_path' : _("absolute_path"),
# TRANSL: Used in usage help. Example:
# tahoe://alias/some_dir
'alias' : _("alias"),
# TRANSL: Used in help to represent a "bucket name" for Amazon Web
# Services' Simple Storage Service (S3). Example:
# s3://other.host/bucket_name[/prefix]
'bucket_name' : _("bucket_name"),
# TRANSL: abbreviation for "character" (noun)
'char' : _("char"),
# TRANSL: noun
'command' : _("command"),
# TRANSL: Used in usage help to represent the name of a container in
# Amazon Web Services' Cloudfront. Example:
# cf+http://container_name
'container_name' : _("container_name"),
# TRANSL: noun
'count' : _("count"),
# TRANSL: Used in usage help to represent the name of a file directory
'directory' : _("directory"),
# TRANSL: Used in usage help to represent the name of a file. Example:
# --log-file <filename>
'filename' : _("filename"),
# TRANSL: Used in usage help to represent an ID for a GnuPG key. Example:
# --encrypt-key <gpg_key_id>
'gpg_key_id' : _("gpg-key-id"),
# TRANSL: Used in usage help, e.g. to represent the name of a code
# module. Example:
# rsync://user[:password]@other.host[:port]::/module/some_dir
'module' : _("module"),
# TRANSL: Used in usage help to represent a desired number of
# something. Example:
# --num-retries <number>
'number' : _("number"),
# TRANSL: Used in usage help. (Should be consistent with the "Options:"
# header.) Example:
# duplicity [full|incremental] [options] source_dir target_url
'options' : _("options"),
# TRANSL: Used in usage help to represent an internet hostname. Example:
# ftp://user[:password]@other.host[:port]/some_dir
'other_host' : _("other.host"),
# TRANSL: Used in usage help. Example:
# ftp://user[:password]@other.host[:port]/some_dir
'password' : _("password"),
# TRANSL: Used in usage help to represent a Unix-style path name. Example:
# --archive-dir <path>
'path' : _("path"),
# TRANSL: Used in usage help to represent a TCP port number. Example:
# ftp://user[:password]@other.host[:port]/some_dir
'port' : _("port"),
# TRANSL: Used in usage help. This represents a string to be used as a
# prefix to names for backup files created by Duplicity. Example:
# s3://other.host/bucket_name[/prefix]
'prefix' : _("prefix"),
# TRANSL: Used in usage help to represent a Unix-style path name. Example:
# rsync://user[:password]@other.host[:port]/relative_path
'relative_path' : _("relative_path"),
# TRANSL: Used in usage help. Example:
# --timeout <seconds>
'seconds' : _("seconds"),
# TRANSL: Used in usage help to represent a "glob" style pattern for
# matching one or more files, as described in the documentation.
# Example:
# --exclude <shell_pattern>
'shell_pattern' : _("shell_pattern"),
# TRANSL: Used in usage help to represent the name of a single file
# directory or a Unix-style path to a directory. Example:
# file:///some_dir
'some_dir' : _("some_dir"),
# TRANSL: Used in usage help to represent the name of a single file
# directory or a Unix-style path to a directory where files will be
# coming FROM. Example:
# duplicity [full|incremental] [options] source_dir target_url
'source_dir' : _("source_dir"),
# TRANSL: Used in usage help to represent a URL files will be coming
# FROM. Example:
# duplicity [restore] [options] source_url target_dir
'source_url' : _("source_url"),
# TRANSL: Used in usage help to represent the name of a single file
# directory or a Unix-style path to a directory. where files will be
# going TO. Example:
# duplicity [restore] [options] source_url target_dir
'target_dir' : _("target_dir"),
# TRANSL: Used in usage help to represent a URL files will be going TO.
# Example:
# duplicity [full|incremental] [options] source_dir target_url
'target_url' : _("target_url"),
# TRANSL: Used in usage help to represent a time spec for a previous
# point in time, as described in the documentation. Example:
# duplicity remove-older-than time [options] target_url
'time' : _("time"),
# TRANSL: Used in usage help to represent a user name (i.e. login).
# Example:
# ftp://user[:password]@other.host[:port]/some_dir
'user' : _("user") }
# TRANSL: Header in usage help
msg = """
duplicity [full|incremental] [%(options)s] %(source_dir)s %(target_url)s
duplicity [restore] [%(options)s] %(source_url)s %(target_dir)s
duplicity verify [%(options)s] %(source_url)s %(target_dir)s
duplicity collection-status [%(options)s] %(target_url)s
duplicity list-current-files [%(options)s] %(target_url)s
duplicity cleanup [%(options)s] %(target_url)s
duplicity remove-older-than %(time)s [%(options)s] %(target_url)s
duplicity remove-all-but-n-full %(count)s [%(options)s] %(target_url)s
duplicity remove-all-inc-of-but-n-full %(count)s [%(options)s] %(target_url)s
""" % dict
# TRANSL: Header in usage help
msg = msg + _("Backends and their URL formats:") + """
cf+http://%(container_name)s
file:///%(some_dir)s
ftp://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(some_dir)s
ftps://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(some_dir)s
hsi://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(some_dir)s
imap://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(some_dir)s
rsync://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]::/%(module)s/%(some_dir)s
rsync://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(relative_path)s
rsync://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]//%(absolute_path)s
s3://%(other_host)s/%(bucket_name)s[/%(prefix)s]
s3+http://%(bucket_name)s[/%(prefix)s]
scp://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(some_dir)s
ssh://%(user)s[:%(password)s]@%(other_host)s[:%(port)s]/%(some_dir)s
swift://%(container_name)s
tahoe://%(alias)s/%(directory)s
webdav://%(user)s[:%(password)s]@%(other_host)s/%(some_dir)s
webdavs://%(user)s[:%(password)s]@%(other_host)s/%(some_dir)s
gdocs://%(user)s[:%(password)s]@%(other_host)s/%(some_dir)s
mega://%(user)s[:%(password)s]@%(other_host)s/%(some_dir)s
copy://%(user)s[:%(password)s]@%(other_host)s/%(some_dir)s
dpbx:///%(some_dir)s
""" % dict
# TRANSL: Header in usage help
msg = msg + _("Commands:") + """
cleanup <%(target_url)s>
collection-status <%(target_url)s>
full <%(source_dir)s> <%(target_url)s>
incr <%(source_dir)s> <%(target_url)s>
list-current-files <%(target_url)s>
restore <%(target_url)s> <%(source_dir)s>
remove-older-than <%(time)s> <%(target_url)s>
remove-all-but-n-full <%(count)s> <%(target_url)s>
remove-all-inc-of-but-n-full <%(count)s> <%(target_url)s>
verify <%(target_url)s> <%(source_dir)s>""" % dict
return msg
def set_archive_dir(dirstring):
"""Check archive dir and set global"""
if not os.path.exists(dirstring):
try:
os.makedirs(dirstring)
except Exception:
pass
archive_dir = path.Path(dirstring)
if not archive_dir.isdir():
log.FatalError(_("Specified archive directory '%s' does not exist, "
"or is not a directory") % (util.ufn(archive_dir.name),),
log.ErrorCode.bad_archive_dir)
globals.archive_dir = archive_dir
def set_sign_key(sign_key):
"""Set globals.sign_key assuming proper key given"""
if not len(sign_key) == 8 or not re.search("^[0-9A-F]*$", sign_key):
log.FatalError(_("Sign key should be an 8 character hex string, like "
"'AA0E73D2'.\nReceived '%s' instead.") % (sign_key,),
log.ErrorCode.bad_sign_key)
globals.gpg_profile.sign_key = sign_key
def set_selection():
"""Return selection iter starting at filename with arguments applied"""
global select_opts, select_files
sel = selection.Select(globals.local_path)
sel.ParseArgs(select_opts, select_files)
globals.select = sel.set_iter()
def args_to_path_backend(arg1, arg2):
"""
Given exactly two arguments, arg1 and arg2, figure out which one
is the backend URL and which one is a local path, and return
(local, backend).
"""
arg1_is_backend, arg2_is_backend = backend.is_backend_url(arg1), backend.is_backend_url(arg2)
if not arg1_is_backend and not arg2_is_backend:
command_line_error(
"""One of the arguments must be an URL. Examples of URL strings are
"scp://user@host.net:1234/path" and "file:///usr/local". See the man
page for more information.""")
if arg1_is_backend and arg2_is_backend:
command_line_error("Two URLs specified. "
"One argument should be a path.")
if arg1_is_backend:
return (arg2, arg1)
elif arg2_is_backend:
return (arg1, arg2)
else:
raise AssertionError('should not be reached')
def set_backend(arg1, arg2):
"""Figure out which arg is url, set backend
Return value is pair (path_first, path) where is_first is true iff
path made from arg1.
"""
path, bend = args_to_path_backend(arg1, arg2)
globals.backend = backend.get_backend(bend)
if path == arg2:
return (None, arg2) # False?
else:
return (1, arg1) # True?
def process_local_dir(action, local_pathname):
"""Check local directory, set globals.local_path"""
local_path = path.Path(path.Path(local_pathname).get_canonical())
if action == "restore":
if (local_path.exists() and not local_path.isemptydir()) and not globals.force:
log.FatalError(_("Restore destination directory %s already "
"exists.\nWill not overwrite.") % (util.ufn(local_path.name),),
log.ErrorCode.restore_dir_exists)
elif action == "verify":
if not local_path.exists():
log.FatalError(_("Verify directory %s does not exist") %
(util.ufn(local_path.name),),
log.ErrorCode.verify_dir_doesnt_exist)
else:
assert action == "full" or action == "inc"
if not local_path.exists():
log.FatalError(_("Backup source directory %s does not exist.")
% (util.ufn(local_path.name),),
log.ErrorCode.backup_dir_doesnt_exist)
globals.local_path = local_path
def check_consistency(action):
"""Final consistency check, see if something wrong with command line"""
global full_backup, select_opts, list_current
def assert_only_one(arglist):
"""Raises error if two or more of the elements of arglist are true"""
n = 0
for m in arglist:
if m:
n += 1
assert n <= 1, "Invalid syntax, two conflicting modes specified"
if action in ["list-current", "collection-status",
"cleanup", "remove-old", "remove-all-but-n-full", "remove-all-inc-of-but-n-full"]:
assert_only_one([list_current, collection_status, cleanup,
globals.remove_time is not None])
elif action == "restore" or action == "verify":
if full_backup:
command_line_error("--full option cannot be used when "
"restoring or verifying")
elif globals.incremental:
command_line_error("--incremental option cannot be used when "
"restoring or verifying")
if select_opts and action == "restore":
log.Warn(_("Command line warning: %s") % _("Selection options --exclude/--include\n"
"currently work only when backing up,"
"not restoring."))
else:
assert action == "inc" or action == "full"
if verify:
command_line_error("--verify option cannot be used "
"when backing up")
if globals.restore_dir:
command_line_error("restore option incompatible with %s backup"
% (action,))
def ProcessCommandLine(cmdline_list):
"""Process command line, set globals, return action
action will be "list-current", "collection-status", "cleanup",
"remove-old", "restore", "verify", "full", or "inc".
"""
globals.gpg_profile = gpg.GPGProfile()
args = parse_cmdline_options(cmdline_list)
# we can now try to import all the backends
backend.import_backends()
# parse_cmdline_options already verified that we got exactly 1 or 2
# non-options arguments
assert len(args) >= 1 and len(args) <= 2, "arg count should have been checked already"
if len(args) == 1:
if list_current:
action = "list-current"
elif collection_status:
action = "collection-status"
elif cleanup:
action = "cleanup"
elif globals.remove_time is not None:
action = "remove-old"
elif globals.remove_all_but_n_full_mode:
action = "remove-all-but-n-full"
elif globals.remove_all_inc_of_but_n_full_mode:
action = "remove-all-inc-of-but-n-full"
else:
command_line_error("Too few arguments")
globals.backend = backend.get_backend(args[0])
if not globals.backend:
log.FatalError(_("""Bad URL '%s'.
Examples of URL strings are "scp://user@host.net:1234/path" and
"file:///usr/local". See the man page for more information.""") % (args[0],),
log.ErrorCode.bad_url)
elif len(args) == 2:
# Figure out whether backup or restore
backup, local_pathname = set_backend(args[0], args[1])
if backup:
if full_backup:
action = "full"
else:
action = "inc"
else:
if verify:
action = "verify"
else:
action = "restore"
process_local_dir(action, local_pathname)
if action in ['full', 'inc', 'verify']:
set_selection()
elif len(args) > 2:
raise AssertionError("this code should not be reachable")
check_consistency(action)
log.Info(_("Main action: ") + action)
return action
| gpl-2.0 |
redhat-openstack/django | django/contrib/gis/maps/google/gmap.py | 174 | 9102 | from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import xrange
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {0} {1}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{0}.{1}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{0}{1}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{0}\n <script type="text/javascript">\n//<![CDATA[\n{1}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{0}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {0}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
| bsd-3-clause |
espadrine/opera | chromium/src/third_party/python_26/Lib/types.py | 65 | 2323 | """Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
try:
CodeType = type(_f.func_code)
except RuntimeError:
# Execution in restricted environment
pass
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
FileType = file
XRangeType = xrange
try:
raise TypeError
except TypeError:
try:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
except AttributeError:
# In the restricted environment, exc_info returns (None, None,
# None) Then, tb.tb_frame gives an attribute error
pass
tb = None; del tb
SliceType = slice
EllipsisType = type(Ellipsis)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.func_code)
MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export
| bsd-3-clause |
AnimationInVR/avango | avango-python/src/tests/TestFieldContainer.py | 6 | 5337 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango
import mock
import unittest
class FieldContainerTestCase(unittest.TestCase):
def testRawCreateInstance(self):
self.failIfEqual(None, avango._make_instance_by_name("MockFieldContainer"))
def testRawGetField(self):
mcf = avango._make_instance_by_name("MockFieldContainer")
self.failIfEqual(None, mcf)
self.failIfEqual(None, mcf._get_field("AnIntField"))
def testExplicitGetValue(self):
mcf = avango._make_instance_by_name("MockFieldContainer")
self.failIfEqual(None, mcf)
field = mcf._get_field("AnIntField")
self.failIfEqual(None, field)
self.assertEqual(0, field.get_value())
def testImplicitCreateInstance(self):
node = mock.nodes.MockFieldContainer()
self.failIfEqual(None, node)
self.failIfEqual(None, node._get_field("AnIntField"))
def testGetFieldAsAttribute(self):
node = mock.nodes.MockFieldContainer()
self.failIfEqual(None, node)
field = node.AnIntField
self.failIfEqual(None, field)
self.assertEqual(0, field.get_value())
def testCreateInstanceWithKeywordArguments(self):
node = mock.nodes.MockFieldContainer(AnIntField = 42)
self.failIfEqual(None, node)
self.assertEqual(42, node.AnIntField.get_value())
def testExplicitSetValue(self):
mcf = mock.nodes.MockFieldContainer()
self.failIfEqual(None, mcf)
self.assertEqual(0, mcf.AnIntField.get_value())
mcf.AnIntField.set_value(42)
self.assertEqual(42, mcf.AnIntField.get_value())
def testAccessFieldValueFromProperty(self):
mcf = mock.nodes.MockFieldContainer()
self.assertEqual(0, mcf.AnIntField.value)
mcf.AnIntField.value = 42;
self.assertEqual(42, mcf.AnIntField.value)
def testGetUnknownAttribute(self):
node = mock.nodes.MockFieldContainer()
self.failIfEqual(None, node)
self.assertRaises(AttributeError, lambda:node.NotAFieldName)
def testSetUnknownFieldAsAttribute(self):
node = mock.nodes.MockFieldContainer()
self.failIfEqual(None, node)
node.NotAFieldName = 42
self.assert_(42, node.NotAFieldName)
def testImplicitCreateUnknownInstance(self):
def create():
node = avango.nodes.NameANodeShouldNeverHave()
self.assertRaises(ImportError, create)
def testGetValues(self):
mcf = mock.nodes.MockFieldContainer()
values = mcf.get_values()
self.assertEqual(0, values.AnIntField)
self.assertEqual(0, mcf.AnIntField.value)
values.AnIntField = 42;
self.assertEqual(42, values.AnIntField)
self.assertEqual(42, mcf.AnIntField.value)
def testCompare(self):
mcf = mock.nodes.MockFieldContainer()
mcf.AFieldContainer.value = mcf
self.assertEqual(mcf, mcf.AFieldContainer.value)
def testGetInstanceByName(self):
node = mock.nodes.MockFieldContainer()
self.failIfEqual(None, node)
name = "MyNode"
node.Name.value = name
self.assertEqual(node, avango.get_instance_by_name(name))
def Suite():
suite = unittest.TestSuite()
FieldContainerTests= [
'testRawCreateInstance',
'testRawGetField',
'testExplicitGetValue',
'testImplicitCreateInstance',
'testGetFieldAsAttribute',
'testCreateInstanceWithKeywordArguments',
'testExplicitSetValue',
'testAccessFieldValueFromProperty',
'testGetUnknownAttribute',
'testSetUnknownFieldAsAttribute',
'testImplicitCreateUnknownInstance',
'testGetValues',
'testCompare',
'testGetInstanceByName',
]
suite.addTests(map(FieldContainerTestCase, FieldContainerTests))
return suite
| lgpl-3.0 |
safwanrahman/mozillians | vendor-local/lib/python/unidecode/x05b.py | 252 | 4668 | data = (
'Gui ', # 0x00
'Deng ', # 0x01
'Zhi ', # 0x02
'Xu ', # 0x03
'Yi ', # 0x04
'Hua ', # 0x05
'Xi ', # 0x06
'Hui ', # 0x07
'Rao ', # 0x08
'Xi ', # 0x09
'Yan ', # 0x0a
'Chan ', # 0x0b
'Jiao ', # 0x0c
'Mei ', # 0x0d
'Fan ', # 0x0e
'Fan ', # 0x0f
'Xian ', # 0x10
'Yi ', # 0x11
'Wei ', # 0x12
'Jiao ', # 0x13
'Fu ', # 0x14
'Shi ', # 0x15
'Bi ', # 0x16
'Shan ', # 0x17
'Sui ', # 0x18
'Qiang ', # 0x19
'Lian ', # 0x1a
'Huan ', # 0x1b
'Xin ', # 0x1c
'Niao ', # 0x1d
'Dong ', # 0x1e
'Yi ', # 0x1f
'Can ', # 0x20
'Ai ', # 0x21
'Niang ', # 0x22
'Neng ', # 0x23
'Ma ', # 0x24
'Tiao ', # 0x25
'Chou ', # 0x26
'Jin ', # 0x27
'Ci ', # 0x28
'Yu ', # 0x29
'Pin ', # 0x2a
'Yong ', # 0x2b
'Xu ', # 0x2c
'Nai ', # 0x2d
'Yan ', # 0x2e
'Tai ', # 0x2f
'Ying ', # 0x30
'Can ', # 0x31
'Niao ', # 0x32
'Wo ', # 0x33
'Ying ', # 0x34
'Mian ', # 0x35
'Kaka ', # 0x36
'Ma ', # 0x37
'Shen ', # 0x38
'Xing ', # 0x39
'Ni ', # 0x3a
'Du ', # 0x3b
'Liu ', # 0x3c
'Yuan ', # 0x3d
'Lan ', # 0x3e
'Yan ', # 0x3f
'Shuang ', # 0x40
'Ling ', # 0x41
'Jiao ', # 0x42
'Niang ', # 0x43
'Lan ', # 0x44
'Xian ', # 0x45
'Ying ', # 0x46
'Shuang ', # 0x47
'Shuai ', # 0x48
'Quan ', # 0x49
'Mi ', # 0x4a
'Li ', # 0x4b
'Luan ', # 0x4c
'Yan ', # 0x4d
'Zhu ', # 0x4e
'Lan ', # 0x4f
'Zi ', # 0x50
'Jie ', # 0x51
'Jue ', # 0x52
'Jue ', # 0x53
'Kong ', # 0x54
'Yun ', # 0x55
'Zi ', # 0x56
'Zi ', # 0x57
'Cun ', # 0x58
'Sun ', # 0x59
'Fu ', # 0x5a
'Bei ', # 0x5b
'Zi ', # 0x5c
'Xiao ', # 0x5d
'Xin ', # 0x5e
'Meng ', # 0x5f
'Si ', # 0x60
'Tai ', # 0x61
'Bao ', # 0x62
'Ji ', # 0x63
'Gu ', # 0x64
'Nu ', # 0x65
'Xue ', # 0x66
'[?] ', # 0x67
'Zhuan ', # 0x68
'Hai ', # 0x69
'Luan ', # 0x6a
'Sun ', # 0x6b
'Huai ', # 0x6c
'Mie ', # 0x6d
'Cong ', # 0x6e
'Qian ', # 0x6f
'Shu ', # 0x70
'Chan ', # 0x71
'Ya ', # 0x72
'Zi ', # 0x73
'Ni ', # 0x74
'Fu ', # 0x75
'Zi ', # 0x76
'Li ', # 0x77
'Xue ', # 0x78
'Bo ', # 0x79
'Ru ', # 0x7a
'Lai ', # 0x7b
'Nie ', # 0x7c
'Nie ', # 0x7d
'Ying ', # 0x7e
'Luan ', # 0x7f
'Mian ', # 0x80
'Zhu ', # 0x81
'Rong ', # 0x82
'Ta ', # 0x83
'Gui ', # 0x84
'Zhai ', # 0x85
'Qiong ', # 0x86
'Yu ', # 0x87
'Shou ', # 0x88
'An ', # 0x89
'Tu ', # 0x8a
'Song ', # 0x8b
'Wan ', # 0x8c
'Rou ', # 0x8d
'Yao ', # 0x8e
'Hong ', # 0x8f
'Yi ', # 0x90
'Jing ', # 0x91
'Zhun ', # 0x92
'Mi ', # 0x93
'Zhu ', # 0x94
'Dang ', # 0x95
'Hong ', # 0x96
'Zong ', # 0x97
'Guan ', # 0x98
'Zhou ', # 0x99
'Ding ', # 0x9a
'Wan ', # 0x9b
'Yi ', # 0x9c
'Bao ', # 0x9d
'Shi ', # 0x9e
'Shi ', # 0x9f
'Chong ', # 0xa0
'Shen ', # 0xa1
'Ke ', # 0xa2
'Xuan ', # 0xa3
'Shi ', # 0xa4
'You ', # 0xa5
'Huan ', # 0xa6
'Yi ', # 0xa7
'Tiao ', # 0xa8
'Shi ', # 0xa9
'Xian ', # 0xaa
'Gong ', # 0xab
'Cheng ', # 0xac
'Qun ', # 0xad
'Gong ', # 0xae
'Xiao ', # 0xaf
'Zai ', # 0xb0
'Zha ', # 0xb1
'Bao ', # 0xb2
'Hai ', # 0xb3
'Yan ', # 0xb4
'Xiao ', # 0xb5
'Jia ', # 0xb6
'Shen ', # 0xb7
'Chen ', # 0xb8
'Rong ', # 0xb9
'Huang ', # 0xba
'Mi ', # 0xbb
'Kou ', # 0xbc
'Kuan ', # 0xbd
'Bin ', # 0xbe
'Su ', # 0xbf
'Cai ', # 0xc0
'Zan ', # 0xc1
'Ji ', # 0xc2
'Yuan ', # 0xc3
'Ji ', # 0xc4
'Yin ', # 0xc5
'Mi ', # 0xc6
'Kou ', # 0xc7
'Qing ', # 0xc8
'Que ', # 0xc9
'Zhen ', # 0xca
'Jian ', # 0xcb
'Fu ', # 0xcc
'Ning ', # 0xcd
'Bing ', # 0xce
'Huan ', # 0xcf
'Mei ', # 0xd0
'Qin ', # 0xd1
'Han ', # 0xd2
'Yu ', # 0xd3
'Shi ', # 0xd4
'Ning ', # 0xd5
'Qin ', # 0xd6
'Ning ', # 0xd7
'Zhi ', # 0xd8
'Yu ', # 0xd9
'Bao ', # 0xda
'Kuan ', # 0xdb
'Ning ', # 0xdc
'Qin ', # 0xdd
'Mo ', # 0xde
'Cha ', # 0xdf
'Ju ', # 0xe0
'Gua ', # 0xe1
'Qin ', # 0xe2
'Hu ', # 0xe3
'Wu ', # 0xe4
'Liao ', # 0xe5
'Shi ', # 0xe6
'Zhu ', # 0xe7
'Zhai ', # 0xe8
'Shen ', # 0xe9
'Wei ', # 0xea
'Xie ', # 0xeb
'Kuan ', # 0xec
'Hui ', # 0xed
'Liao ', # 0xee
'Jun ', # 0xef
'Huan ', # 0xf0
'Yi ', # 0xf1
'Yi ', # 0xf2
'Bao ', # 0xf3
'Qin ', # 0xf4
'Chong ', # 0xf5
'Bao ', # 0xf6
'Feng ', # 0xf7
'Cun ', # 0xf8
'Dui ', # 0xf9
'Si ', # 0xfa
'Xun ', # 0xfb
'Dao ', # 0xfc
'Lu ', # 0xfd
'Dui ', # 0xfe
'Shou ', # 0xff
)
| bsd-3-clause |
tuffery/Frog2 | frowns/build/lib/frowns/Chirality/TetraHedral.py | 2 | 2462 | """TetraHedral.T(order, chirality)
order = atoms in the input order of the smiles string.
chirality = @ anti-clockwise @@ clockwise
TetraHedral.T.getChirality(neworder) -> return the chirality
of the new order of the atoms
order -> atom order of the molecules
around the center.
If the number of atoms is 3 then the
a hydrogen atom is assumed to complete
the chirality.
Lookup table
chirality = '@'
input order = (1,2,3,4)
neworder, chirality
1,2,3,4 : '@'
"""
REVERSE = 0
SAME = 1
chiral_table = {
(0,1,2,3): SAME,
(0,2,3,1): SAME,
(0,3,2,1): SAME,
(1,0,2,3): REVERSE,
(1,2,3,0): REVERSE,
(1,2,3,0): REVERSE,
(2,0,1,3): SAME,
(2,1,3,0): SAME,
(2,3,0,1): SAME,
(3,0,1,2): REVERSE,
(3,1,2,0): REVERSE,
(3,2,0,1): REVERSE,
(0,1,3,2): REVERSE,
(0,3,2,1): REVERSE,
(0,2,1,3): REVERSE,
(1,0,3,2): SAME,
(1,3,2,0): SAME,
(1,2,0,3): SAME,
(2,0,3,1): REVERSE,
(2,3,1,0): REVERSE,
(2,1,0,3): REVERSE,
(3,0,2,1): SAME,
(3,2,1,0): SAME,
(3,1,0,2): SAME,
(0,1,2): SAME,
(1,2,0): SAME,
(2,0,1): SAME,
(0,2,1): REVERSE,
(2,1,0): REVERSE,
(1,0,2): REVERSE
}
class T:
def __init__(self, order, chirality):
# store the initial order
self.order = order
self._initialOrder = [x.handle for x in order]
self.chirality = "@"
# normalize to anti-clockwise ordering
if chirality == "@@":
order[-1], order[-0] = order[-0], order[-1]
def __str__(self):
text = "Chirality\n"\
" %s -> %s"%(self.order, self.chirality)
return text
def getChirality(self, order):
"""(order)->what is the chirality of a given order of
atoms?"""
indices = tuple([self._initialOrder.index(atom.handle)
for atom in order])
same = chiral_table[indices]
if same:
return self.chirality
else:
if self.chirality == "@": return "@@"
else: return "@"
if __name__ == "__main__":
class Atom:
def __init__(self, id):
self.handle = id
def __repr__(self):
return "Atom(%s)"%self.handle
a = Atom(0)
b = Atom(1)
c = Atom(2)
d = Atom(3)
c0 = T([a,b,c,d], "@")
print c0.getChirality([a,b,c,d])
print c0.getChirality([a,b,d,c])
| gpl-3.0 |
Zelgadis87/Sick-Beard | sickbeard/metadata/generic.py | 6 | 37135 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
import re
import sickbeard
from sickbeard import exceptions, helpers
from sickbeard.metadata import helpers as metadata_helpers
from sickbeard import logger
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from sickbeard import subtitle_queue
import glob
class GenericMetadata():
"""
Base class for all metadata providers. Default behavior is meant to mostly
follow XBMC 12+ metadata standards. Has support for:
- show metadata file
- episode metadata file
- episode thumbnail
- show fanart
- show poster
- show banner
- season thumbnails (poster)
- season thumbnails (banner)
- season all poster
- season all banner
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
subtitles=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
self.name = "Generic"
self._ep_nfo_extension = "nfo"
self._show_metadata_filename = "tvshow.nfo"
self.fanart_name = "fanart.jpg"
self.poster_name = "poster.jpg"
self.banner_name = "banner.jpg"
self.season_all_poster_name = "season-all-poster.jpg"
self.season_all_banner_name = "season-all-banner.jpg"
self.show_metadata = show_metadata
self.episode_metadata = episode_metadata
self.fanart = fanart
self.poster = poster
self.banner = banner
self.episode_thumbnails = episode_thumbnails
self.season_posters = season_posters
self.season_banners = season_banners
self.season_all_poster = season_all_poster
self.season_all_banner = season_all_banner
self.subtitles = subtitles
self.eg_subtitles = "Season##\\<i>filename.language</i>.srt"
def get_config(self):
config_list = [self.show_metadata, self.episode_metadata, self.fanart, self.poster, self.banner, self.episode_thumbnails, self.season_posters, self.season_banners, self.season_all_poster, self.season_all_banner, self.subtitles]
return '|'.join([str(int(x)) for x in config_list])
def get_id(self):
return GenericMetadata.makeID(self.name)
@staticmethod
def makeID(name):
name_id = re.sub("[+]", "plus", name)
name_id = re.sub("[^\w\d_]", "_", name_id).lower()
return name_id
def set_config(self, string):
config_list = [bool(int(x)) for x in string.split('|')]
self.show_metadata = config_list[0]
self.episode_metadata = config_list[1]
self.fanart = config_list[2]
self.poster = config_list[3]
self.banner = config_list[4]
self.episode_thumbnails = config_list[5]
self.season_posters = config_list[6]
self.season_banners = config_list[7]
self.season_all_poster = config_list[8]
self.season_all_banner = config_list[9]
self.subtitles = config_list[10] if len(config_list) >= 11 else 0
def _has_show_metadata(self, show_obj):
result = ek.ek(os.path.isfile, self.get_show_file_path(show_obj))
logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_metadata(self, ep_obj):
result = ek.ek(os.path.isfile, self.get_episode_file_path(ep_obj))
logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_fanart(self, show_obj):
result = ek.ek(os.path.isfile, self.get_fanart_path(show_obj))
logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_poster_path(show_obj))
logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_banner_path(show_obj))
logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_thumb(self, ep_obj):
location = self.get_episode_thumb_path(ep_obj)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_poster(self, show_obj, season):
location = self.get_season_poster_path(show_obj, season)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_subtitle(self, ep_obj):
#Assumes that an episode have subtitles if any srt file is on the disk
#with the following pattern: episode_file_name_without_extension*.srt
subtitlePath = ep_obj.location.rpartition(".")[0] + "*.srt"
subtitlePath = subtitlePath.replace("[", "*").replace("]", "*")
locations = glob.glob(subtitlePath)
logger.log("Checking if "+subtitlePath+" exists: "+str(len(locations)), logger.DEBUG)
return True if len(locations) > 0 else False
def _has_season_banner(self, show_obj, season):
location = self.get_season_banner_path(show_obj, season)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_poster_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_banner_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def get_show_file_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self._show_metadata_filename)
def get_episode_file_path(self, ep_obj):
return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension)
def get_fanart_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.fanart_name)
def get_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.poster_name)
def get_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.banner_name)
def get_episode_thumb_path(self, ep_obj):
"""
Returns the path where the episode thumbnail should be stored.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_filename = ep_obj.location.rpartition(".")
if tbn_filename[0] == "":
tbn_filename = ep_obj.location + "-thumb.jpg"
else:
tbn_filename = tbn_filename[0] + "-thumb.jpg"
else:
return None
return tbn_filename
def get_season_poster_path(self, show_obj, season):
"""
Returns the full path to the file for a given season poster.
show_obj: a TVShow instance for which to generate the path
season: a season number to be used for the path. Note that season 0
means specials.
"""
# Our specials thumbnail is, well, special
if season == 0:
season_poster_filename = 'season-specials'
else:
season_poster_filename = 'season' + str(season).zfill(2)
return ek.ek(os.path.join, show_obj.location, season_poster_filename + '-poster.jpg')
def get_season_banner_path(self, show_obj, season):
"""
Returns the full path to the file for a given season banner.
show_obj: a TVShow instance for which to generate the path
season: a season number to be used for the path. Note that season 0
means specials.
"""
# Our specials thumbnail is, well, special
if season == 0:
season_banner_filename = 'season-specials'
else:
season_banner_filename = 'season' + str(season).zfill(2)
return ek.ek(os.path.join, show_obj.location, season_banner_filename + '-banner.jpg')
def get_season_all_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.season_all_poster_name)
def get_season_all_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.season_all_banner_name)
def _show_data(self, show_obj):
"""
This should be overridden by the implementing class. It should
provide the content of the show metadata file.
"""
return None
def _ep_data(self, ep_obj):
"""
This should be overridden by the implementing class. It should
provide the content of the episode metadata file.
"""
return None
def create_show_metadata(self, show_obj):
if self.show_metadata and show_obj and not self._has_show_metadata(show_obj):
logger.log(u"Metadata provider " + self.name + " creating show metadata for " + show_obj.name, logger.DEBUG)
return self.write_show_file(show_obj)
return False
def create_episode_metadata(self, ep_obj):
if self.episode_metadata and ep_obj and not self._has_episode_metadata(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode metadata for " + ep_obj.prettyName(), logger.DEBUG)
return self.write_ep_file(ep_obj)
return False
def create_fanart(self, show_obj):
if self.fanart and show_obj and not self._has_fanart(show_obj):
logger.log(u"Metadata provider " + self.name + " creating fanart for " + show_obj.name, logger.DEBUG)
return self.save_fanart(show_obj)
return False
def create_poster(self, show_obj):
if self.poster and show_obj and not self._has_poster(show_obj):
logger.log(u"Metadata provider " + self.name + " creating poster for " + show_obj.name, logger.DEBUG)
return self.save_poster(show_obj)
return False
def create_banner(self, show_obj):
if self.banner and show_obj and not self._has_banner(show_obj):
logger.log(u"Metadata provider " + self.name + " creating banner for " + show_obj.name, logger.DEBUG)
return self.save_banner(show_obj)
return False
def create_episode_thumb(self, ep_obj):
if self.episode_thumbnails and ep_obj and not self._has_episode_thumb(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.prettyName(), logger.DEBUG)
return self.save_thumbnail(ep_obj)
return False
def create_season_posters(self, show_obj):
if self.season_posters and show_obj:
result = []
for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable
if not self._has_season_poster(show_obj, season):
logger.log(u"Metadata provider " + self.name + " creating season posters for " + show_obj.name, logger.DEBUG)
result = result + [self.save_season_posters(show_obj, season)]
return all(result)
return False
def create_season_banners(self, show_obj):
if self.season_banners and show_obj:
result = []
for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable
if not self._has_season_banner(show_obj, season):
logger.log(u"Metadata provider " + self.name + " creating season banners for " + show_obj.name, logger.DEBUG)
result = result + [self.save_season_banners(show_obj, season)]
return all(result)
return False
def create_subtitles(self, ep_obj, force=False):
if self.subtitles and ep_obj and not self._has_episode_subtitle(ep_obj):
logger.log("Metadata provider "+self.name+" added to SUBTITLE-QUEUE: "+ep_obj.prettyName(), logger.DEBUG)
# make a queue item for it and put it on the queue
sub_queue_item = subtitle_queue.SubtitleQueueItem(ep_obj, force)
sickbeard.subtitleQueueScheduler.action.add_item(sub_queue_item)
return True
return False
def create_season_all_poster(self, show_obj):
if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj):
logger.log(u"Metadata provider " + self.name + " creating season all poster for " + show_obj.name, logger.DEBUG)
return self.save_season_all_poster(show_obj)
return False
def create_season_all_banner(self, show_obj):
if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj):
logger.log(u"Metadata provider " + self.name + " creating season all banner for " + show_obj.name, logger.DEBUG)
return self.save_season_all_banner(show_obj)
return False
def _get_episode_thumb_url(self, ep_obj):
"""
Returns the URL to use for downloading an episode's thumbnail. Uses
theTVDB.com data.
ep_obj: a TVEpisode object for which to grab the thumb URL
"""
all_eps = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
# get a TVDB object
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
tvdb_show_obj = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(e.message)
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR)
return None
# try all included episodes in case some have thumbs and others don't
for cur_ep in all_eps:
try:
myEp = tvdb_show_obj[cur_ep.season][cur_ep.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(cur_ep.season) + "x" + str(cur_ep.episode) + " on tvdb... has it been removed? Should I delete from db?")
continue
thumb_url = myEp["filename"]
if thumb_url:
return thumb_url
return None
def write_show_file(self, show_obj):
"""
Generates and writes show_obj's metadata under the given path to the
filename given by get_show_file_path()
show_obj: TVShow object for which to create the metadata
path: An absolute or relative path where we should put the file. Note that
the file name will be the default show_file_name.
Note that this method expects that _show_data will return an ElementTree
object. If your _show_data returns data in another format you'll need to
override this method.
"""
data = self._show_data(show_obj)
if not data:
return False
nfo_file_path = self.get_show_file_path(show_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing show nfo file to " + nfo_file_path, logger.DEBUG)
nfo_file = ek.ek(open, nfo_file_path, 'w')
data.write(nfo_file, encoding="utf-8")
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR)
return False
return True
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
Note that this method expects that _ep_data will return an ElementTree
object. If your _ep_data returns data in another format you'll need to
override this method.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
nfo_file = ek.ek(open, nfo_file_path, 'w')
data.write(nfo_file, encoding="utf-8")
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR)
return False
return True
def save_thumbnail(self, ep_obj):
"""
Retrieves a thumbnail and saves it to the correct spot. This method should not need to
be overridden by implementing classes, changing get_episode_thumb_path and
_get_episode_thumb_url should suffice.
ep_obj: a TVEpisode object for which to generate a thumbnail
"""
file_path = self.get_episode_thumb_path(ep_obj)
if not file_path:
logger.log(u"Unable to find a file path to use for this thumbnail, not generating it", logger.DEBUG)
return False
thumb_url = self._get_episode_thumb_url(ep_obj)
# if we can't find one then give up
if not thumb_url:
logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG)
return False
thumb_data = metadata_helpers.getShowImage(thumb_url)
result = self._write_image(thumb_data, file_path)
if not result:
return False
for cur_ep in [ep_obj] + ep_obj.relatedEps:
cur_ep.hastbn = True
return True
def save_fanart(self, show_obj, which=None):
"""
Downloads a fanart image and saves it to the filename specified by fanart_name
inside the show's root folder.
show_obj: a TVShow object for which to download fanart
"""
# use the default fanart name
fanart_path = self.get_fanart_path(show_obj)
fanart_data = self._retrieve_show_image('fanart', show_obj, which)
if not fanart_data:
logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG)
return False
return self._write_image(fanart_data, fanart_path)
def save_poster(self, show_obj, which=None):
"""
Downloads a poster image and saves it to the filename specified by poster_name
inside the show's root folder.
show_obj: a TVShow object for which to download a poster
"""
# use the default poster name
poster_path = self.get_poster_path(show_obj)
poster_data = self._retrieve_show_image('poster', show_obj, which)
if not poster_data:
logger.log(u"No show poster image was retrieved, unable to write poster", logger.DEBUG)
return False
return self._write_image(poster_data, poster_path)
def save_banner(self, show_obj, which=None):
"""
Downloads a banner image and saves it to the filename specified by banner_name
inside the show's root folder.
show_obj: a TVShow object for which to download a banner
"""
# use the default banner name
banner_path = self.get_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path)
def save_season_posters(self, show_obj, season):
"""
Saves all season posters to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season posters if possible. This
method should not need to be overridden by implementing classes, changing
_season_posters_dict and get_season_poster_path should be good enough.
"""
season_dict = self._season_posters_dict(show_obj, season)
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if len(cur_season_art) == 0:
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem() # @UnusedVariable
season_poster_file_path = self.get_season_poster_path(show_obj, cur_season)
if not season_poster_file_path:
logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season", logger.DEBUG)
continue
seasonData = metadata_helpers.getShowImage(season_url)
if not seasonData:
logger.log(u"No season poster data available, skipping this season", logger.DEBUG)
continue
result = result + [self._write_image(seasonData, season_poster_file_path)]
if result:
return all(result)
else:
return False
return True
def save_season_banners(self, show_obj, season):
"""
Saves all season banners to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season banners if possible. This
method should not need to be overridden by implementing classes, changing
_season_banners_dict and get_season_banner_path should be good enough.
"""
season_dict = self._season_banners_dict(show_obj, season)
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if len(cur_season_art) == 0:
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem() # @UnusedVariable
season_banner_file_path = self.get_season_banner_path(show_obj, cur_season)
if not season_banner_file_path:
logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season", logger.DEBUG)
continue
seasonData = metadata_helpers.getShowImage(season_url)
if not seasonData:
logger.log(u"No season banner data available, skipping this season", logger.DEBUG)
continue
result = result + [self._write_image(seasonData, season_banner_file_path)]
if result:
return all(result)
else:
return False
return True
def save_season_all_poster(self, show_obj, which=None):
# use the default season all poster name
poster_path = self.get_season_all_poster_path(show_obj)
poster_data = self._retrieve_show_image('poster', show_obj, which)
if not poster_data:
logger.log(u"No show poster image was retrieved, unable to write season all poster", logger.DEBUG)
return False
return self._write_image(poster_data, poster_path)
def save_season_all_banner(self, show_obj, which=None):
# use the default season all banner name
banner_path = self.get_season_all_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path)
def _write_image(self, image_data, image_path):
"""
Saves the data in image_data to the location image_path. Returns True/False
to represent success or failure.
image_data: binary image data to write to file
image_path: file location to save the image to
"""
# don't bother overwriting it
if ek.ek(os.path.isfile, image_path):
logger.log(u"Image already exists, not downloading", logger.DEBUG)
return False
if not image_data:
logger.log(u"Unable to retrieve image, skipping", logger.WARNING)
return False
image_dir = ek.ek(os.path.dirname, image_path)
try:
if not ek.ek(os.path.isdir, image_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG)
ek.ek(os.makedirs, image_dir)
helpers.chmodAsParent(image_dir)
outFile = ek.ek(open, image_path, 'wb')
outFile.write(image_data)
outFile.close()
helpers.chmodAsParent(image_path)
except IOError, e:
logger.log(u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e), logger.ERROR)
return False
return True
def _retrieve_show_image(self, image_type, show_obj, which=None):
"""
Gets an image URL from theTVDB.com, downloads it and returns the data.
image_type: type of image to retrieve (currently supported: fanart, poster, banner)
show_obj: a TVShow object to use when searching for the image
which: optional, a specific numbered poster to look for
Returns: the binary image data if available, or else None
"""
tvdb_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms)
tvdb_show_obj = t[show_obj.tvdbid]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR)
return None
if image_type not in ('fanart', 'poster', 'banner'):
logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the TVDB object", logger.ERROR)
return None
image_url = tvdb_show_obj[image_type]
image_data = metadata_helpers.getShowImage(image_url, which)
return image_data
def _season_posters_dict(self, show_obj, season):
"""
Should return a dict like:
result = {<season number>:
{1: '<url 1>', 2: <url 2>, ...},}
"""
# This holds our resulting dictionary of season art
result = {}
tvdb_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms)
tvdb_show_obj = t[show_obj.tvdbid]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR)
return result
# if we have no season banners then just finish
if 'season' not in tvdb_show_obj['_banners'] or 'season' not in tvdb_show_obj['_banners']['season']:
return result
# Give us just the normal poster-style season graphics
seasonsArtObj = tvdb_show_obj['_banners']['season']['season']
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
result[season] = {}
# find the correct season in the tvdb object and just copy the dict into our result dict
for seasonArtID in seasonsArtObj.keys():
if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en':
result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
return result
def _season_banners_dict(self, show_obj, season):
"""
Should return a dict like:
result = {<season number>:
{1: '<url 1>', 2: <url 2>, ...},}
"""
# This holds our resulting dictionary of season art
result = {}
tvdb_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms)
tvdb_show_obj = t[show_obj.tvdbid]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR)
return result
# if we have no season banners then just finish
if 'season' not in tvdb_show_obj['_banners'] or 'seasonwide' not in tvdb_show_obj['_banners']['season']:
return result
# Give us just the normal season graphics
seasonsArtObj = tvdb_show_obj['_banners']['season']['seasonwide']
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
result[season] = {}
# find the correct season in the tvdb object and just copy the dict into our result dict
for seasonArtID in seasonsArtObj.keys():
if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en':
result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
return result
def retrieveShowMetadata(self, folder):
"""
Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
"""
empty_return = (None, None)
metadata_path = ek.ek(os.path.join, folder, self._show_metadata_filename)
if not ek.ek(os.path.isdir, folder) or not ek.ek(os.path.isfile, metadata_path):
logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG)
return empty_return
logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG)
try:
with ek.ek(open, metadata_path, 'r') as xmlFileObj:
showXML = etree.ElementTree(file=xmlFileObj)
if showXML.findtext('title') == None or (showXML.findtext('tvdbid') == None and showXML.findtext('id') == None):
logger.log(u"Invalid info in tvshow.nfo (missing name or id):" \
+ str(showXML.findtext('title')) + " " \
+ str(showXML.findtext('tvdbid')) + " " \
+ str(showXML.findtext('id')))
return empty_return
name = showXML.findtext('title')
if showXML.findtext('tvdbid') != None:
tvdb_id = int(showXML.findtext('tvdbid'))
elif showXML.findtext('id'):
tvdb_id = int(showXML.findtext('id'))
else:
logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find an ID", logger.WARNING)
return empty_return
if not tvdb_id:
logger.log(u"Invalid tvdb id (" + str(tvdb_id) + "), not using metadata file", logger.WARNING)
return empty_return
except Exception, e:
logger.log(u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), logger.WARNING)
return empty_return
return (tvdb_id, name)
| gpl-3.0 |
yewang15215/django | tests/validation/test_unique.py | 28 | 6775 | from __future__ import unicode_literals
import datetime
import unittest
from django.apps.registry import Apps
from django.core.exceptions import ValidationError
from django.db import models
from django.test import TestCase
from .models import (
CustomPKModel, FlexibleDatePost, ModelToValidate, Post, UniqueErrorsModel,
UniqueFieldsModel, UniqueForDateModel, UniqueTogetherModel,
)
class GetUniqueCheckTests(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
([(UniqueFieldsModel, ('id',)),
(UniqueFieldsModel, ('unique_charfield',)),
(UniqueFieldsModel, ('unique_integerfield',))],
[]),
m._get_unique_checks()
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
([(UniqueTogetherModel, ('ifield', 'cfield')),
(UniqueTogetherModel, ('ifield', 'efield')),
(UniqueTogetherModel, ('id',)), ],
[]),
m._get_unique_checks()
)
def test_unique_together_normalization(self):
"""
Test the Meta.unique_together normalization with different sorts of
objects.
"""
data = {
'2-tuple': (('foo', 'bar'), (('foo', 'bar'),)),
'list': (['foo', 'bar'], (('foo', 'bar'),)),
'already normalized': ((('foo', 'bar'), ('bar', 'baz')),
(('foo', 'bar'), ('bar', 'baz'))),
'set': ({('foo', 'bar'), ('bar', 'baz')}, # Ref #21469
(('foo', 'bar'), ('bar', 'baz'))),
}
for test_name, (unique_together, normalized) in data.items():
class M(models.Model):
foo = models.IntegerField()
bar = models.IntegerField()
baz = models.IntegerField()
Meta = type(str('Meta'), (), {
'unique_together': unique_together,
'apps': Apps()
})
checks, _ = M()._get_unique_checks()
for t in normalized:
check = (M, t)
self.assertIn(check, checks)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(([(CustomPKModel, ('my_pk_field',))], []), m._get_unique_checks())
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'date', 'count', 'start_date'),
(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks()
)
def test_unique_for_date_exclusion(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks(exclude='start_date')
)
class PerformUniqueChecksTest(TestCase):
def test_primary_key_unique_check_not_performed_when_adding_and_pk_not_specified(self):
# Regression test for #12560
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name='Some Name')
setattr(mtv, '_adding', True)
mtv.full_clean()
def test_primary_key_unique_check_performed_when_adding_and_pk_specified(self):
# Regression test for #12560
with self.assertNumQueries(1):
mtv = ModelToValidate(number=10, name='Some Name', id=123)
setattr(mtv, '_adding', True)
mtv.full_clean()
def test_primary_key_unique_check_not_performed_when_not_adding(self):
# Regression test for #12132
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name='Some Name')
mtv.full_clean()
def test_unique_for_date(self):
Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
p = Post(title="Django 1.0 is released", posted=datetime.date(2008, 9, 3))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'title': ['Title must be unique for Posted date.']})
# Should work without errors
p = Post(title="Work on Django 1.1 begins", posted=datetime.date(2008, 9, 3))
p.full_clean()
# Should work without errors
p = Post(title="Django 1.0 is released", posted=datetime.datetime(2008, 9, 4))
p.full_clean()
p = Post(slug="Django 1.0", posted=datetime.datetime(2008, 1, 1))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'slug': ['Slug must be unique for Posted year.']})
p = Post(subtitle="Finally", posted=datetime.datetime(2008, 9, 30))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'subtitle': ['Subtitle must be unique for Posted month.']})
p = Post(title="Django 1.0 is released")
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'posted': ['This field cannot be null.']})
def test_unique_for_date_with_nullable_date(self):
"""
unique_for_date/year/month checks shouldn't trigger when the
associated DateField is None.
"""
FlexibleDatePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
p = FlexibleDatePost(title="Django 1.0 is released")
p.full_clean()
p = FlexibleDatePost(slug="Django 1.0")
p.full_clean()
p = FlexibleDatePost(subtitle="Finally")
p.full_clean()
def test_unique_errors(self):
UniqueErrorsModel.objects.create(name='Some Name', no=10)
m = UniqueErrorsModel(name='Some Name', no=11)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(cm.exception.message_dict, {'name': ['Custom unique name message.']})
m = UniqueErrorsModel(name='Some Other Name', no=10)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(cm.exception.message_dict, {'no': ['Custom unique number message.']})
| bsd-3-clause |
j-dasilva/COMP4350 | apartment/messaging/message.py | 1 | 1169 | from django.conf import settings
import time
class Message(object):
def __init__(self, *args, **kwargs):
vals = self.process_args(args, kwargs)
self.sender = vals['sender']
self.recipient = vals['recipient']
self.urgency = int(vals['urgency'])
self.content = vals['content']
self.timestamp = int(vals['timestamp'])
self.read = (vals['read'] == 'True')
def process_args(self, args, kwargs):
if len(kwargs) == 6:
return kwargs
elif len(args) == 1:
return args[0]
elif settings.CREATE_STUBS:
# CREATE A STUB MESSAGE
return self.create_stub()
else:
raise MessageException()
def create_stub(self):
return {
"sender": "StubSender",
"recipient": "StubRecipient",
"urgency": "1",
"content": "Stub Message Body",
"timestamp": time.time(),
"read": "False"
}
class MessageException(BaseException):
def __init__(self):
super(MessageException, self).__init__("Failed to create Message. Please refer to constructor.") | gpl-2.0 |
ran5515/DeepDecision | tensorflow/python/debug/__init__.py | 16 | 2580 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Python API of TensorFlow Debugger (tfdbg).
See the @{$python/tfdbg} guide.
@@add_debug_tensor_watch
@@watch_graph
@@watch_graph_with_blacklists
@@DebugTensorDatum
@@DebugDumpDir
@@load_tensor_from_event
@@load_tensor_from_event_file
@@has_inf_or_nan
@@DumpingDebugHook
@@DumpingDebugWrapperSession
@@GrpcDebugHook
@@GrpcDebugWrapperSession
@@LocalCLIDebugHook
@@LocalCLIDebugWrapperSession
@@WatchOptions
@@GradientsDebugger
@@clear_gradient_debuggers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-imports
from tensorflow.python.debug.lib.debug_data import DebugDumpDir
from tensorflow.python.debug.lib.debug_data import DebugTensorDatum
from tensorflow.python.debug.lib.debug_data import has_inf_or_nan
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event_file
from tensorflow.python.debug.lib.debug_gradients import GradientsDebugger
from tensorflow.python.debug.lib.debug_utils import add_debug_tensor_watch
from tensorflow.python.debug.lib.debug_utils import watch_graph
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_blacklists
from tensorflow.python.debug.wrappers.dumping_wrapper import DumpingDebugWrapperSession
from tensorflow.python.debug.wrappers.framework import WatchOptions
from tensorflow.python.debug.wrappers.grpc_wrapper import GrpcDebugWrapperSession
from tensorflow.python.debug.wrappers.hooks import DumpingDebugHook
from tensorflow.python.debug.wrappers.hooks import GrpcDebugHook
from tensorflow.python.debug.wrappers.hooks import LocalCLIDebugHook
from tensorflow.python.debug.wrappers.local_cli_wrapper import LocalCLIDebugWrapperSession
from tensorflow.python.util import all_util as _all_util
_all_util.remove_undocumented(__name__)
| apache-2.0 |
inovtec-solutions/OpenERP | openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/Repeatln.py | 90 | 13231 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from ServerParameter import *
from lib.logreport import *
from lib.rpc import *
from LoginTest import *
database="test_db1"
uid = 3
#class RepeatIn:
class RepeatIn( unohelper.Base, XJobExecutor ):
def __init__(self, sObject="", sVariable="", sFields="", sDisplayName="", bFromModify=False):
# Interface Design
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 250, "RepeatIn Builder")
self.win.addFixedText("lblVariable", 2, 12, 60, 15, "Objects to loop on :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 120, 15,True, itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Field to loop on :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 120, 150, False,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblName", 12, 187, 60, 15, "Variable name :")
self.win.addEdit("txtName", 180-120-2, 185, 120, 15,)
self.win.addFixedText("lblUName", 8, 207, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 205, 120, 15,)
self.win.addButton('btnOK',-2 ,-10,45,15,'Ok', actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-2 - 45 - 5 ,-10,45,15,'Cancel', actionListenerProc = self.btnCancel_clicked )
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
# Variable Declaration
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGVariable=sVariable
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListRepeatIn=[]
self.aVariableList=[]
# Call method to perform Enumration on Report Document
EnumDocument(self.aItemList,self.aComponentAdd)
# Perform checking that Field-1 and Field - 4 is available or not alos get Combobox
# filled if condition is true
desktop = getDesktop()
doc = desktop.getCurrentComponent()
docinfo = doc.getDocumentInfo()
# Check weather Field-1 is available if not then exit from application
self.sMyHost= ""
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost= docinfo.getUserFieldValue(0)
self.count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text = cursor.getText()
tcur = text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find(" ")] == "List", self.aObjectList ) )
for i in range(len(self.aItemList)):
try:
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".") + 1:] == tcur.TextTable.Name:
VariableScope( tcur, self.aVariableList, self.aObjectList, self.aComponentAdd, self.aItemList, component )
except :
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('RepeatIn', LOG_ERROR, info)
self.bModify=bFromModify
if self.bModify==True:
if sObject=="":
self.insVariable.setText("List of "+docinfo.getUserFieldValue(3))
self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields"))
self.win.setEditText("txtName", sVariable)
self.win.setEditText("txtUName",sDisplayName)
self.sValue= "objects"
else:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sObject:
sItem = anObject
self.insVariable.setText( sItem )
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListRepeatIn,
self.insField,
self.sMyHost,
2,
ending=['one2many','many2many'],
recur=['one2many','many2many']
)
self.sValue= self.win.getListBoxItem("lstFields",self.aListRepeatIn.index(sFields))
for var in self.aVariableList:
if var[:8] <> 'List of ':
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
else:
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[8:])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
if var[:8]<>'List of ':
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem('List of ' + self.model_res[0]['name'] ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please Select Appropriate module" ,"Create new report from: \nOpenERP -> Open a New Report")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
sItem=self.win.getListBoxSelectedItem("lstFields")
sMain=self.aListRepeatIn[self.win.getListBoxSelectedItemPos("lstFields")]
if self.bModify==True:
self.win.setEditText("txtName", self.sGVariable)
self.win.setEditText("txtUName",self.sGDisplayName)
else:
self.win.setEditText("txtName",sMain[sMain.rfind("/")+1:])
self.win.setEditText("txtUName","|-."+sItem[sItem.rfind("/")+1:]+".-|")
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
sItem=self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:8]=='List of ':
if var[:8]==sItem[:8]:
sItem = var
elif var[:var.find("(")+1] == sItem[:sItem.find("(")+1]:
sItem = var
self.aListRepeatIn=[]
data = ( sItem[sItem.rfind(" ") + 1:] == docinfo.getUserFieldValue(3) ) and docinfo.getUserFieldValue(3) or sItem[sItem.find("(")+1:sItem.find(")")]
genTree( data, self.aListRepeatIn, self.insField, self.sMyHost, 2, ending=['one2many','many2many'], recur=['one2many','many2many'] )
self.win.selectListBoxItemPos("lstFields", 0, True )
else:
sItem=self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:8]=='List of ' and var[:8] == sItem[:8]:
sItem = var
if sItem.find(".")==-1:
temp=sItem[sItem.rfind("x_"):]
else:
temp=sItem[sItem.rfind(".")+1:]
self.win.setEditText("txtName",temp)
self.win.setEditText("txtUName","|-."+temp+".-|")
self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields"))
self.win.selectListBoxItemPos("lstFields", 0, True )
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
selectedItem = self.win.getListBoxSelectedItem( "lstFields" )
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstFields" )
txtName = self.win.getEditText( "txtName" )
txtUName = self.win.getEditText( "txtUName" )
if selectedItem != "" and txtName != "" and txtUName != "":
sKey=u""+ txtUName
if selectedItem == "objects":
sValue=u"[[ repeatIn(" + selectedItem + ",'" + txtName + "') ]]"
else:
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]"
if self.bModify == True:
oCurObj = cursor.TextField
oCurObj.Items = (sKey,sValue)
oCurObj.update()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getListBoxSelectedItem("lstFields") == "objects":
oInputList.Items = (sKey,sValue)
doc.Text.insertTextContent(cursor,oInputList,False)
else:
sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
doc.Text.insertTextContent(cursor,oInputList,False)
else:
oInputList.Items = (sKey,sValue)
widget = ( cursor.TextTable or selectedItem <> 'objects' ) and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Object Field or Name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
RepeatIn()
elif __name__=="package":
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation( RepeatIn, "org.openoffice.openerp.report.repeatln", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benthomasson/ansible | lib/ansible/modules/windows/win_firewall.py | 27 | 2296 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Michael Eaton <meaton@iforium.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_firewall
version_added: '2.4'
short_description: Enable or disable the Windows Firewall
description:
- Enable or Disable Windows Firewall profiles.
options:
profiles:
description:
- Specify one or more profiles to change.
choices:
- Domain
- Private
- Public
default: [Domain, Private, Public]
state:
description:
- Set state of firewall for given profile.
choices:
- enabled
- disabled
requirements:
- This module requires Windows Management Framework 5 or later.
author: Michael Eaton (@MichaelEaton83)
'''
EXAMPLES = r'''
- name: Enable firewall for Domain, Public and Private profiles
win_firewall:
state: enabled
profiles:
- Domain
- Private
- Public
tags: enable_firewall
- name: Disable Domain firewall
win_firewall:
state: disabled
profiles:
- Domain
tags: disable_firewall
'''
RETURN = r'''
enabled:
description: current firewall status for chosen profile (after any potential change)
returned: always
type: bool
sample: true
profiles:
description: chosen profile
returned: always
type: string
sample: Domain
state:
description: desired state of the given firewall profile(s)
returned: always
type: list
sample: enabled
'''
| gpl-3.0 |
ThirdProject/android_external_chromium_org | tools/telemetry/telemetry/page/page_set_archive_info.py | 23 | 6197 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shutil
import sys
from telemetry.page import cloud_storage
class PageSetArchiveInfo(object):
def __init__(self, file_path, data):
self._file_path = file_path
self._base_dir = os.path.dirname(file_path)
# Ensure directory exists.
if not os.path.exists(self._base_dir):
os.makedirs(self._base_dir)
# Download all .wpr files.
for archive_path in data['archives']:
archive_path = self._WprFileNameToPath(archive_path)
try:
cloud_storage.GetIfChanged(cloud_storage.INTERNAL_BUCKET, archive_path)
except (cloud_storage.CredentialsError,
cloud_storage.PermissionError) as e:
if os.path.exists(archive_path):
# If the archive exists, assume the user recorded their own and
# simply warn.
logging.warning('Could not download WPR archive: %s', archive_path)
else:
# If the archive doesn't exist, this is fatal.
logging.error('Can not run without required WPR archive: %s. '
'If you believe you have credentials, follow the '
'instructions below. If you do not have credentials, '
'you may use record_wpr to make your own recording or '
'run against live sites with --allow-live-sites.',
archive_path)
logging.error(e)
sys.exit(1)
# Map from the relative path (as it appears in the metadata file) of the
# .wpr file to a list of urls it supports.
self._wpr_file_to_urls = data['archives']
# Map from the page url to a relative path (as it appears in the metadata
# file) of the .wpr file.
self._url_to_wpr_file = dict()
# Find out the wpr file names for each page.
for wpr_file in data['archives']:
page_urls = data['archives'][wpr_file]
for url in page_urls:
self._url_to_wpr_file[url] = wpr_file
self.temp_target_wpr_file_path = None
@classmethod
def FromFile(cls, file_path):
if os.path.exists(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return cls(file_path, data)
return cls(file_path, {'archives': {}})
def WprFilePathForPage(self, page):
if self.temp_target_wpr_file_path:
return self.temp_target_wpr_file_path
wpr_file = self._url_to_wpr_file.get(page.url, None)
if wpr_file:
return self._WprFileNameToPath(wpr_file)
return None
def AddNewTemporaryRecording(self, temp_target_wpr_file_path):
self.temp_target_wpr_file_path = temp_target_wpr_file_path
def AddRecordedPages(self, urls):
(target_wpr_file, target_wpr_file_path) = self._NextWprFileName()
for url in urls:
self._SetWprFileForPage(url, target_wpr_file)
shutil.move(self.temp_target_wpr_file_path, target_wpr_file_path)
# Update the hash file.
with open(target_wpr_file_path + '.sha1', 'wb') as f:
f.write(cloud_storage.GetHash(target_wpr_file_path))
f.flush()
self._WriteToFile()
self._DeleteAbandonedWprFiles()
def _DeleteAbandonedWprFiles(self):
# Update the metadata so that the abandoned wpr files don't have empty url
# arrays.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del self._wpr_file_to_urls[wpr_file]
# Don't fail if we're unable to delete some of the files.
wpr_file_path = self._WprFileNameToPath(wpr_file)
try:
os.remove(wpr_file_path)
except Exception:
logging.warning('Failed to delete file: %s' % wpr_file_path)
def _AbandonedWprFiles(self):
abandoned_wpr_files = []
for wpr_file, urls in self._wpr_file_to_urls.iteritems():
if not urls:
abandoned_wpr_files.append(wpr_file)
return abandoned_wpr_files
def _WriteToFile(self):
"""Writes the metadata into the file passed as constructor parameter."""
metadata = dict()
metadata['description'] = (
'Describes the Web Page Replay archives for a page set. Don\'t edit by '
'hand! Use record_wpr for updating.')
metadata['archives'] = self._wpr_file_to_urls.copy()
# Don't write data for abandoned archives.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del metadata['archives'][wpr_file]
with open(self._file_path, 'w') as f:
json.dump(metadata, f, indent=4)
f.flush()
def _WprFileNameToPath(self, wpr_file):
return os.path.abspath(os.path.join(self._base_dir, wpr_file))
def _NextWprFileName(self):
"""Creates a new file name for a wpr archive file."""
# The names are of the format "some_thing_number.wpr". Read the numbers.
highest_number = -1
base = None
for wpr_file in self._wpr_file_to_urls:
match = re.match(r'(?P<BASE>.*)_(?P<NUMBER>[0-9]+)\.wpr', wpr_file)
if not match:
raise Exception('Illegal wpr file name ' + wpr_file)
highest_number = max(int(match.groupdict()['NUMBER']), highest_number)
if base and match.groupdict()['BASE'] != base:
raise Exception('Illegal wpr file name ' + wpr_file +
', doesn\'t begin with ' + base)
base = match.groupdict()['BASE']
if not base:
# If we're creating a completely new info file, use the base name of the
# page set file.
base = os.path.splitext(os.path.basename(self._file_path))[0]
new_filename = '%s_%03d.wpr' % (base, highest_number + 1)
return new_filename, self._WprFileNameToPath(new_filename)
def _SetWprFileForPage(self, url, wpr_file):
"""For modifying the metadata when we're going to record a new archive."""
old_wpr_file = self._url_to_wpr_file.get(url, None)
if old_wpr_file:
self._wpr_file_to_urls[old_wpr_file].remove(url)
self._url_to_wpr_file[url] = wpr_file
if wpr_file not in self._wpr_file_to_urls:
self._wpr_file_to_urls[wpr_file] = []
self._wpr_file_to_urls[wpr_file].append(url)
| bsd-3-clause |
nature-python/youcai-contest | application/utils/cipherutils.py | 1 | 1513 | #!/usr/bin/python
#encoding:utf-8
#
#author:xin.xin
#since:14-5-19上午10:35
#
#
from binascii import b2a_hex, a2b_hex
from Crypto.Cipher import AES
from application import app
class CipherUtils(object):
#加密函数,如果text不足16位就用空格补足为16位,
#如果大于16当时不是16的倍数,那就补足为16的倍数。
@staticmethod
def encrypt(text):
cryptor = AES.new(app.config['PASSWORD_CIPHER_KEY'], AES.MODE_CBC, '0000000000000000')
#这里密钥key 长度必须为16(AES-128),
#24(AES-192),或者32 (AES-256)Bytes 长度
#目前AES-128 足够目前使用
length = 16
count = len(text)
if count < length:
add = (length - count)
#\0 backspace
text = text + (' ' * add)
elif count > length:
add = (length - (count % length))
text = text + ('\0' * add)
ciphertext = cryptor.encrypt(text)
#因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题
#所以这里统一把加密后的字符串转化为16进制字符串
return b2a_hex(ciphertext)
#解密后,去掉补足的空格用strip() 去掉
@staticmethod
def decrypt(text):
cryptor = AES.new(app.config['PASSWORD_CIPHER_KEY'], AES.MODE_CBC, '0000000000000000')
plain_text = cryptor.decrypt(a2b_hex(text))
return plain_text.rstrip('\0')
| apache-2.0 |
gmalik9/voicex | http_handler/views.py | 5 | 2428 | """
Copyright (c) 2012 Anant Bhardwaj
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import *
import json
from voicex.main import VoiceX
from transport import config
'''
Main Query Handler (Views)
@author: Anant Bhardwaj
@date: Oct 8, 2012
'''
gv = VoiceX(auth= config.GV_VOICEX_AUTH_2)
at = VoiceX(auth= config.AT_VOICEX_AUTH)
def index(request):
return render_to_response("index.html")
@csrf_exempt
def voicex_us(request):
if(request.POST):
msg_data = {}
if('number' in request.POST):
msg_data['from'] = request.POST['number']
if('text' in request.POST):
msg_data['text'] = request.POST['text']
if('from' in request.POST):
msg_data['from'] = request.POST['from']
try:
gv.msg_new(msg_data)
return HttpResponse("ok")
except Exception, e:
return HttpResponse("error")
else:
return HttpResponse("invalid request type")
@csrf_exempt
def voicex_ke(request):
if(request.POST):
msg_data = {}
if('number' in request.POST):
msg_data['from'] = request.POST['number']
if('text' in request.POST):
msg_data['text'] = request.POST['text']
if('from' in request.POST):
msg_data['from'] = request.POST['from']
try:
at.msg_new(msg_data)
return HttpResponse("ok")
except Exception, e:
return HttpResponse("error")
else:
return HttpResponse("invalid request type")
| mit |
ujenmr/ansible | lib/ansible/plugins/shell/sh.py | 63 | 4012 | # Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: sh
plugin_type: shell
short_description: "POSIX shell (/bin/sh)"
version_added: historical
description:
- This shell plugin is the one you want to use on most Unix systems, it is the most compatible and widely installed shell.
extends_documentation_fragment:
- shell_common
'''
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell import ShellBase
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles.
# Note: sh is the default shell plugin so this plugin may also be selected
# This code needs to be SH-compliant. BASH-isms will not work if /bin/sh points to a non-BASH shell.
# if the filename is not listed in any Shell plugin.
COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'sh'
# commonly used
ECHO = 'echo'
COMMAND_SEP = ';'
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
_SHELL_AND = '&&'
_SHELL_OR = '||'
_SHELL_SUB_LEFT = '"`'
_SHELL_SUB_RIGHT = '`"'
_SHELL_GROUP_LEFT = '('
_SHELL_GROUP_RIGHT = ')'
def checksum(self, path, python_interp):
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code expects
#
# If all of the available hashing methods fail we fail with an rc of 0.
# This logic is added to the end of the cmd at the bottom of this function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = shlex_quote(path)
test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
csums = [
u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
]
cmd = (" %s " % self._SHELL_OR).join(csums)
cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
return cmd
| gpl-3.0 |
abenzbiria/clients_odoo | addons/l10n_ve/__openerp__.py | 119 | 3056 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
##############################################################################
# Module programed and financed by:
# Vauxoo, C.A. (<http://vauxoo.com>).
# Our Community team mantain this module:
# https://launchpad.net/~openerp-venezuela
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Venezuela - Accounting',
'version': '1.0',
'author': ['OpenERP SA', 'Vauxoo'],
'category': 'Localization/Account Charts',
'description':
"""
Chart of Account for Venezuela.
===============================
Venezuela doesn't have any chart of account by law, but the default
proposed in OpenERP should comply with some Accepted best practices in Venezuela,
this plan comply with this practices.
This module has been tested as base for more of 1000 companies, because
it is based in a mixtures of most common software in the Venezuelan
market what will allow for sure to accountants feel them first steps with
OpenERP more confortable.
This module doesn't pretend be the total localization for Venezuela,
but it will help you to start really quickly with OpenERP in this country.
This module give you.
---------------------
- Basic taxes for Venezuela.
- Have basic data to run tests with community localization.
- Start a company from 0 if your needs are basic from an accounting PoV.
We recomend install account_anglo_saxon if you want valued your
stocks as Venezuela does with out invoices.
If you install this module, and select Custom chart a basic chart will be proposed,
but you will need set manually account defaults for taxes.
""",
'depends': ['account',
'base_vat',
'account_chart'
],
'demo': [],
'data': ['data/account_tax_code.xml',
'data/account_user_types.xml',
'data/account_chart.xml',
'data/account_tax.xml',
'data/l10n_chart_ve_wizard.xml'
],
'auto_install': False,
'installable': True,
'images': ['images/config_chart_l10n_ve.jpeg',
'images/l10n_ve_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openstack/python-openstacksdk | openstack/tests/unit/load_balancer/test_health_monitor.py | 3 | 4075 | # Copyright 2017 Rackspace, US Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
import uuid
from openstack.load_balancer.v2 import health_monitor
EXAMPLE = {
'admin_state_up': True,
'created_at': '2017-07-17T12:14:57.233772',
'delay': 10,
'expected_codes': '200, 202',
'http_method': 'HEAD',
'id': uuid.uuid4(),
'max_retries': 2,
'max_retries_down': 3,
'name': 'test_health_monitor',
'operating_status': 'ONLINE',
'pools': [{'id': uuid.uuid4()}],
'pool_id': uuid.uuid4(),
'project_id': uuid.uuid4(),
'provisioning_status': 'ACTIVE',
'timeout': 4,
'type': 'HTTP',
'updated_at': '2017-07-17T12:16:57.233772',
'url_path': '/health_page.html'
}
class TestPoolHealthMonitor(base.TestCase):
def test_basic(self):
test_hm = health_monitor.HealthMonitor()
self.assertEqual('healthmonitor', test_hm.resource_key)
self.assertEqual('healthmonitors', test_hm.resources_key)
self.assertEqual('/lbaas/healthmonitors', test_hm.base_path)
self.assertTrue(test_hm.allow_create)
self.assertTrue(test_hm.allow_fetch)
self.assertTrue(test_hm.allow_commit)
self.assertTrue(test_hm.allow_delete)
self.assertTrue(test_hm.allow_list)
def test_make_it(self):
test_hm = health_monitor.HealthMonitor(**EXAMPLE)
self.assertTrue(test_hm.is_admin_state_up)
self.assertEqual(EXAMPLE['created_at'], test_hm.created_at)
self.assertEqual(EXAMPLE['delay'], test_hm.delay)
self.assertEqual(EXAMPLE['expected_codes'], test_hm.expected_codes)
self.assertEqual(EXAMPLE['http_method'], test_hm.http_method)
self.assertEqual(EXAMPLE['id'], test_hm.id)
self.assertEqual(EXAMPLE['max_retries'], test_hm.max_retries)
self.assertEqual(EXAMPLE['max_retries_down'], test_hm.max_retries_down)
self.assertEqual(EXAMPLE['name'], test_hm.name)
self.assertEqual(EXAMPLE['operating_status'], test_hm.operating_status)
self.assertEqual(EXAMPLE['pools'], test_hm.pools)
self.assertEqual(EXAMPLE['pool_id'], test_hm.pool_id)
self.assertEqual(EXAMPLE['project_id'], test_hm.project_id)
self.assertEqual(EXAMPLE['provisioning_status'],
test_hm.provisioning_status)
self.assertEqual(EXAMPLE['timeout'], test_hm.timeout)
self.assertEqual(EXAMPLE['type'], test_hm.type)
self.assertEqual(EXAMPLE['updated_at'], test_hm.updated_at)
self.assertEqual(EXAMPLE['url_path'], test_hm.url_path)
self.assertDictEqual(
{'limit': 'limit',
'marker': 'marker',
'created_at': 'created_at',
'updated_at': 'updated_at',
'name': 'name',
'project_id': 'project_id',
'tags': 'tags',
'any_tags': 'tags-any',
'not_tags': 'not-tags',
'not_any_tags': 'not-tags-any',
'operating_status': 'operating_status',
'provisioning_status': 'provisioning_status',
'is_admin_state_up': 'admin_state_up',
'delay': 'delay',
'expected_codes': 'expected_codes',
'http_method': 'http_method',
'max_retries': 'max_retries',
'max_retries_down': 'max_retries_down',
'pool_id': 'pool_id',
'timeout': 'timeout',
'type': 'type',
'url_path': 'url_path'
},
test_hm._query_mapping._mapping)
| apache-2.0 |
CoolCloud/flask-admin | flask_admin/tests/test_model.py | 7 | 19165 | import wtforms
from nose.tools import eq_, ok_
from flask import Flask, session
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.test import Client
from wtforms import fields
from flask_admin import Admin, form
from flask_admin._compat import iteritems, itervalues
from flask_admin.model import base, filters
from flask_admin.model.template import macro
from itertools import islice
def wtforms2_and_up(func):
"""Decorator for skipping test if wtforms <2
"""
if int(wtforms.__version__[0]) < 2:
func.__test__ = False
return func
class Model(object):
def __init__(self, id=None, c1=1, c2=2, c3=3):
self.id = id
self.col1 = c1
self.col2 = c2
self.col3 = c3
class Form(form.BaseForm):
col1 = fields.StringField()
col2 = fields.StringField()
col3 = fields.StringField()
class SimpleFilter(filters.BaseFilter):
def apply(self, query):
query._applied = True
return query
def operation(self):
return 'test'
class MockModelView(base.BaseModelView):
def __init__(self, model, data=None, name=None, category=None,
endpoint=None, url=None, **kwargs):
# Allow to set any attributes from parameters
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(MockModelView, self).__init__(model, name, category, endpoint, url)
self.created_models = []
self.updated_models = []
self.deleted_models = []
self.search_arguments = []
if data is None:
self.all_models = {1: Model(1), 2: Model(2)}
else:
self.all_models = data
self.last_id = len(self.all_models) + 1
# Scaffolding
def get_pk_value(self, model):
return model.id
def scaffold_list_columns(self):
columns = ['col1', 'col2', 'col3']
if self.column_exclude_list:
return filter(lambda x: x not in self.column_exclude_list, columns)
return columns
def init_search(self):
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
return [SimpleFilter(name)]
def scaffold_sortable_columns(self):
return ['col1', 'col2', 'col3']
def scaffold_form(self):
return Form
# Data
def get_list(self, page, sort_field, sort_desc, search, filters,
page_size=None):
self.search_arguments.append((page, sort_field, sort_desc, search, filters))
count = len(self.all_models)
data = islice(itervalues(self.all_models), 0, page_size)
return count, data
def get_one(self, id):
return self.all_models.get(int(id))
def create_model(self, form):
model = Model(self.last_id)
self.last_id += 1
form.populate_obj(model)
self.created_models.append(model)
self.all_models[model.id] = model
return True
def update_model(self, form, model):
form.populate_obj(model)
self.updated_models.append(model)
return True
def delete_model(self, model):
self.deleted_models.append(model)
return True
def setup():
app = Flask(__name__)
app.config['CSRF_ENABLED'] = False
app.secret_key = '1'
admin = Admin(app)
return app, admin
def test_mockview():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
eq_(view.model, Model)
eq_(view.name, 'Model')
eq_(view.endpoint, 'model')
# Verify scaffolding
eq_(view._sortable_columns, ['col1', 'col2', 'col3'])
eq_(view._create_form_class, Form)
eq_(view._edit_form_class, Form)
eq_(view._search_supported, False)
eq_(view._filters, None)
client = app.test_client()
# Make model view requests
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
# Test model creation view
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(col1='test1', col2='test2', col3='test3'))
eq_(rv.status_code, 302)
eq_(len(view.created_models), 1)
model = view.created_models.pop()
eq_(model.id, 3)
eq_(model.col1, 'test1')
eq_(model.col2, 'test2')
eq_(model.col3, 'test3')
# Try model edit view
rv = client.get('/admin/model/edit/?id=3')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1' in data)
rv = client.post('/admin/model/edit/?id=3',
data=dict(col1='test!', col2='test@', col3='test#'))
eq_(rv.status_code, 302)
eq_(len(view.updated_models), 1)
model = view.updated_models.pop()
eq_(model.col1, 'test!')
eq_(model.col2, 'test@')
eq_(model.col3, 'test#')
rv = client.get('/admin/model/edit/?id=4')
eq_(rv.status_code, 302)
# Attempt to delete model
rv = client.post('/admin/model/delete/?id=3')
eq_(rv.status_code, 302)
eq_(rv.headers['location'], 'http://localhost/admin/model/')
# Create a dispatched application to test that edit view's "save and
# continue" functionality works when app is not located at root
dummy_app = Flask('dummy_app')
dispatched_app = DispatcherMiddleware(dummy_app, {'/dispatched': app})
dispatched_client = Client(dispatched_app)
app_iter, status, headers = dispatched_client.post(
'/dispatched/admin/model/edit/?id=3',
data=dict(col1='another test!', col2='test@', col3='test#', _continue_editing='True'))
eq_(status, '302 FOUND')
eq_(headers['Location'], 'http://localhost/dispatched/admin/model/edit/?id=3')
model = view.updated_models.pop()
eq_(model.col1, 'another test!')
def test_permissions():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.can_create = False
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 302)
view.can_edit = False
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.status_code, 302)
view.can_delete = False
rv = client.post('/admin/model/delete/?id=1')
eq_(rv.status_code, 302)
def test_templates():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.list_template = 'mock.html'
view.create_template = 'mock.html'
view.edit_template = 'mock.html'
rv = client.get('/admin/model/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/new/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.data, b'Success!')
def test_list_columns():
app, admin = setup()
view = MockModelView(Model,
column_list=['col1', 'col3'],
column_labels=dict(col1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('col1', 'Column1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Column1' in data)
ok_('Col2' not in data)
def test_exclude_columns():
app, admin = setup()
view = MockModelView(Model, column_exclude_list=['col2'])
admin.add_view(view)
eq_(view._list_columns, [('col1', 'Col1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Col1' in data)
ok_('Col2' not in data)
def test_sortable_columns():
app, admin = setup()
view = MockModelView(Model, column_sortable_list=['col1', ('col2', 'test1')])
admin.add_view(view)
eq_(view._sortable_columns, dict(col1='col1', col2='test1'))
def test_column_searchable_list():
app, admin = setup()
view = MockModelView(Model, column_searchable_list=['col1', 'col2'])
admin.add_view(view)
eq_(view._search_supported, True)
# TODO: Make calls with search
def test_column_filters():
app, admin = setup()
view = MockModelView(Model, column_filters=['col1', 'col2'])
admin.add_view(view)
eq_(len(view._filters), 2)
eq_(view._filters[0].name, 'col1')
eq_(view._filters[1].name, 'col2')
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col1']], [(0, 'test')])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col2']], [(1, 'test')])
# TODO: Make calls with filters
def test_filter_list_callable():
app, admin = setup()
flt = SimpleFilter('test', options=lambda: (('1', 'Test 1'), ('2', 'Test 2')))
view = MockModelView(Model, column_filters=[flt])
admin.add_view(view)
opts = flt.get_options(view)
eq_(len(opts), 2)
eq_(opts, [('1', u'Test 1'), ('2', u'Test 2')])
def test_form():
# TODO: form_columns
# TODO: form_excluded_columns
# TODO: form_args
# TODO: form_widget_args
pass
@wtforms2_and_up
def test_csrf():
from datetime import timedelta
from wtforms.csrf.session import SessionCSRF
from wtforms.meta import DefaultMeta
# BaseForm w/ CSRF
class SecureForm(form.BaseForm):
class Meta(DefaultMeta):
csrf = True
csrf_class = SessionCSRF
csrf_secret = b'EPj00jpfj8Gx1SjnyLxwBBSQfnQ9DJYe0Ym'
csrf_time_limit = timedelta(minutes=20)
@property
def csrf_context(self):
return session
class SecureModelView(MockModelView):
form_base_class = SecureForm
def scaffold_form(self):
return SecureForm
def get_csrf_token(data):
data = data.split('name="csrf_token" type="hidden" value="')[1]
token = data.split('"')[0]
return token
app, admin = setup()
view = SecureModelView(Model, endpoint='secure')
admin.add_view(view)
client = app.test_client()
################
# create_view
################
rv = client.get('/admin/secure/new/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Create without CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1'))
eq_(rv.status_code, 200)
# Create with CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1',
csrf_token=csrf_token))
eq_(rv.status_code, 302)
###############
# edit_view
###############
rv = client.get('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Edit without CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1'))
eq_(rv.status_code, 200)
# Edit with CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1', csrf_token=csrf_token))
eq_(rv.status_code, 302)
################
# delete_view
################
rv = client.get('/admin/secure/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Delete without CSRF token, test validation errors
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/"), follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' not in rv.data.decode('utf-8'))
ok_(u'Failed to delete record.' in rv.data.decode('utf-8'))
# Delete with CSRF token
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/", csrf_token=csrf_token),
follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' in rv.data.decode('utf-8'))
def test_custom_form():
app, admin = setup()
class TestForm(form.BaseForm):
pass
view = MockModelView(Model, form=TestForm)
admin.add_view(view)
eq_(view._create_form_class, TestForm)
eq_(view._edit_form_class, TestForm)
ok_(not hasattr(view._create_form_class, 'col1'))
def test_modal_edit():
# bootstrap 2 - test edit_modal
app_bs2 = Flask(__name__)
admin_bs2 = Admin(app_bs2, template_mode="bootstrap2")
edit_modal_on = MockModelView(Model, edit_modal=True,
endpoint="edit_modal_on")
edit_modal_off = MockModelView(Model, edit_modal=False,
endpoint="edit_modal_off")
create_modal_on = MockModelView(Model, create_modal=True,
endpoint="create_modal_on")
create_modal_off = MockModelView(Model, create_modal=False,
endpoint="create_modal_off")
admin_bs2.add_view(edit_modal_on)
admin_bs2.add_view(edit_modal_off)
admin_bs2.add_view(create_modal_on)
admin_bs2.add_view(create_modal_off)
client_bs2 = app_bs2.test_client()
# bootstrap 2 - ensure modal window is added when edit_modal is enabled
rv = client_bs2.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test edit modal disabled
rv = client_bs2.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 2 - ensure modal window is added when create_modal is enabled
rv = client_bs2.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test create modal disabled
rv = client_bs2.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3
app_bs3 = Flask(__name__)
admin_bs3 = Admin(app_bs3, template_mode="bootstrap3")
admin_bs3.add_view(edit_modal_on)
admin_bs3.add_view(edit_modal_off)
admin_bs3.add_view(create_modal_on)
admin_bs3.add_view(create_modal_off)
client_bs3 = app_bs3.test_client()
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
def check_class_name():
class DummyView(MockModelView):
pass
view = DummyView(Model)
eq_(view.name, 'Dummy View')
def test_export_csv():
app, admin = setup()
client = app.test_client()
# test redirect when csv export is disabled
view = MockModelView(Model, column_list=['col1', 'col2'], endpoint="test")
admin.add_view(view)
rv = client.get('/admin/test/export/csv/')
eq_(rv.status_code, 302)
# basic test of csv export with a few records
view_data = {
1: Model(1, "col1_1", "col2_1"),
2: Model(2, "col1_2", "col2_2"),
3: Model(3, "col1_3", "col2_3"),
}
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'])
admin.add_view(view)
rv = client.get('/admin/model/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,col2_1\r\n"
"col1_2,col2_2\r\n"
"col1_3,col2_3\r\n" == data)
# test utf8 characters in csv export
view_data[4] = Model(1, u'\u2013ut8_1\u2013', u'\u2013utf8_2\u2013')
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'], endpoint="utf8")
admin.add_view(view)
rv = client.get('/admin/utf8/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_(u'\u2013ut8_1\u2013,\u2013utf8_2\u2013\r\n' in data)
# test row limit
view_data = {
1: Model(1, "col1_1", "col2_1"),
2: Model(2, "col1_2", "col2_2"),
3: Model(3, "col1_3", "col2_3"),
}
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'], export_max_rows=2,
endpoint='row_limit_2')
admin.add_view(view)
rv = client.get('/admin/row_limit_2/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,col2_1\r\n"
"col1_2,col2_2\r\n" == data)
# test None type, integer type, column_labels, and column_formatters
view_data = {
1: Model(1, "col1_1", 1),
2: Model(2, "col1_2", 2),
3: Model(3, None, 3),
}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_labels={'col1': 'Str Field', 'col2': 'Int Field'},
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2),
endpoint="types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Str Field,Int Field\r\n"
"col1_1,2\r\n"
"col1_2,4\r\n"
",6\r\n" == data)
# test column_formatters_export and column_formatters_export
type_formatters = {type(None): lambda view, value: "null"}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters_export=dict(col2=lambda v, c, m, p: m.col2*3),
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), # overridden
column_type_formatters_export=type_formatters,
endpoint="export_types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/export_types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,3\r\n"
"col1_2,6\r\n"
"null,9\r\n" == data)
# Macros are not implemented for csv export yet and will throw an error
view = MockModelView(
Model, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
| bsd-3-clause |
gobstones/PyGobstones | pygobstones/gui/views/gobstonesMain.py | 1 | 21498 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gobstonesMain.ui'
#
# Created by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import sys
import resources
sys.path.append('..')
from pygobstones.commons.i18n import *
from pygobstones.gui.textEditor import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8('MainWindow'))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8('centralwidget'))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.tabWidgetEditors = QtGui.QTabWidget(self.centralwidget)
self.tabWidgetEditors.setObjectName(_fromUtf8('tabWidgetEditors'))
self.tabWidgetEditors.setStyleSheet("border:2px solid #4682b4; border-color:'#4682b4';")
self.tabWidgetEditors.tabBar().setStyleSheet("background-color:'white'; color:'#4682b4'; border:2px solid #4682b4; font-size:15px")
self.tabWidgetEditors.tabBar().setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.tabFile = QtGui.QWidget()
self.tabFile.setStyleSheet("border-color:white")
self.tabFile.setObjectName(_fromUtf8('tabFile'))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tabFile)
self.verticalLayout_3.setObjectName(_fromUtf8('verticalLayout_3'))
self.textEditFile = GobstonesTextEditor(self.tabFile)
self.textEditFile.setObjectName(_fromUtf8('textEditFile'))
self.textEditFile.setStyleSheet("selection-color: white; selection-background-color:#008080")
self.verticalLayout_3.addWidget(self.textEditFile)
self.tabWidgetEditors.addTab(self.tabFile, _fromUtf8(''))
self.tabLibrary = QtGui.QWidget()
self.tabLibrary.setStyleSheet("border-color:white")
self.tabLibrary.setObjectName(_fromUtf8('tabLibrary'))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tabLibrary)
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.textEditLibrary = GobstonesTextEditor(self.tabLibrary)
self.textEditLibrary.setObjectName(_fromUtf8('textEditLibrary'))
self.textEditLibrary.setStyleSheet("selection-color: white; selection-background-color:#008080")
self.verticalLayout_2.addWidget(self.textEditLibrary)
self.tabWidgetEditors.addTab(self.tabLibrary, _fromUtf8(''))
self.set_highlighter(GobstonesHighlighter)
self.logger = QtGui.QTextEdit()
self.logger.setObjectName(_fromUtf8('logger'))
self.logger.setReadOnly(True)
self.logger.setStyleSheet("font-family: Monospace, Consolas, 'Courier New'; font-weight: 100; font-size: 10pt")
self.grid = QtGui.QGridLayout()
self.grid.setSpacing(1)
self.verticalLayout.addLayout(self.grid)
self.splitter = QtGui.QSplitter(QtCore.Qt.Vertical, self.centralwidget)
self.splitter.addWidget(self.tabWidgetEditors)
self.splitter.addWidget(self.logger)
self.verticalLayout.addWidget(self.splitter)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8('statusbar'))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8('toolBar'))
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.menuBar = QtGui.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 703, 20))
self.menuBar.setObjectName(_fromUtf8('menuBar'))
self.menuFile = QtGui.QMenu(self.menuBar)
self.menuFile.setObjectName(_fromUtf8('menuFile'))
self.menuEdit = QtGui.QMenu(self.menuBar)
self.menuEdit.setObjectName(_fromUtf8('menuEdit'))
self.menuGobstones = QtGui.QMenu(self.menuBar)
self.menuGobstones.setObjectName(_fromUtf8('menuGobstones'))
self.menuBoard = QtGui.QMenu(self.menuBar)
self.menuBoard.setObjectName(_fromUtf8('menuBoard'))
self.menuSelectResultView = QtGui.QMenu(self.menuBoard)
self.menuSelectResultView.setObjectName(_fromUtf8
('menuSelectResultView'))
self.menuHelp = QtGui.QMenu(self.menuBar)
self.menuHelp.setObjectName(_fromUtf8('menuHelp'))
MainWindow.setMenuBar(self.menuBar)
self.actionChangeLang = QtGui.QAction(MainWindow)
icon = QtGui.QIcon(":/logoGobstones.png")
self.actionChangeLang.setIcon(icon)
self.actionChangeLang.setObjectName(_fromUtf8('actionChangeLang'))
self.actionNewFile = QtGui.QAction(MainWindow)
icon = QtGui.QIcon(":/new.png")
self.actionNewFile.setIcon(icon)
self.actionNewFile.setObjectName(_fromUtf8('actionNewFile'))
self.actionCloseFile = QtGui.QAction(MainWindow)
icon = QtGui.QIcon(":/close.png")
self.actionCloseFile.setIcon(icon)
self.actionCloseFile.setObjectName(_fromUtf8('actionCloseFile'))
self.actionOpenFile = QtGui.QAction(MainWindow)
icon1 = QtGui.QIcon(":/open.png")
self.actionOpenFile.setIcon(icon1)
self.actionOpenFile.setObjectName(_fromUtf8('actionOpenFile'))
self.actionSave = QtGui.QAction(MainWindow)
icon2 = QtGui.QIcon(":/save.png")
self.actionSave.setIcon(icon2)
self.actionSave.setObjectName(_fromUtf8('actionSave'))
self.actionSaveAs = QtGui.QAction(MainWindow)
icon3 = QtGui.QIcon(":/save-as.png")
self.actionSaveAs.setIcon(icon3)
self.actionSaveAs.setObjectName(_fromUtf8('actionSaveAs'))
self.actionUndo = QtGui.QAction(MainWindow)
icon5 = QtGui.QIcon(":/undo.png")
self.actionUndo.setIcon(icon5)
self.actionUndo.setObjectName(_fromUtf8('actionUndo'))
self.actionRedo = QtGui.QAction(MainWindow)
icon6 = QtGui.QIcon(":/redo.png")
self.actionRedo.setIcon(icon6)
self.actionRedo.setObjectName(_fromUtf8('actionRedo'))
self.actionCut = QtGui.QAction(MainWindow)
icon7 = QtGui.QIcon(":/cut.png")
self.actionCut.setIcon(icon7)
self.actionCut.setObjectName(_fromUtf8('actionCut'))
self.actionCopy = QtGui.QAction(MainWindow)
icon8 = QtGui.QIcon(":/copy.png")
self.actionCopy.setIcon(icon8)
self.actionCopy.setObjectName(_fromUtf8('actionCopy'))
self.actionPaste = QtGui.QAction(MainWindow)
icon9 = QtGui.QIcon(":/paste.png")
self.actionPaste.setIcon(icon9)
self.actionPaste.setObjectName(_fromUtf8('actionPaste'))
self.actionSelectAll = QtGui.QAction(MainWindow)
icon10 = QtGui.QIcon(":/select-all.png")
self.actionSelectAll.setIcon(icon10)
self.actionSelectAll.setObjectName(_fromUtf8('actionSelectAll'))
self.actionFind = QtGui.QAction(MainWindow)
icon11 = QtGui.QIcon(":/find.png")
self.actionFind.setIcon(icon11)
self.actionFind.setObjectName(_fromUtf8('actionFind'))
self.actionReplace = QtGui.QAction(MainWindow)
icon20 = QtGui.QIcon(":/find.png")
self.actionReplace.setIcon(icon20)
self.actionReplace.setObjectName(_fromUtf8('actionReplace'))
self.actionFonts = QtGui.QAction(MainWindow)
icon21 = QtGui.QIcon(":/select-font.png")
self.actionFonts.setIcon(icon21)
self.actionFonts.setObjectName(_fromUtf8('actionFonts'))
self.actionPreferences = QtGui.QAction(MainWindow)
self.actionPreferences.setObjectName(_fromUtf8('actionFonts'))
self.actionCheck = QtGui.QAction(MainWindow)
icon14 = QtGui.QIcon(":/check.png")
self.actionCheck.setIcon(icon14)
self.actionCheck.setObjectName(_fromUtf8('actionCheck'))
self.actionRun = QtGui.QAction(MainWindow)
icon12 = QtGui.QIcon(":/start.png")
self.actionRun.setIcon(icon12)
self.actionRun.setObjectName(_fromUtf8('actionRun'))
self.actionStop = QtGui.QAction(MainWindow)
icon13 = QtGui.QIcon(":/stop.png")
self.actionStop.setIcon(icon13)
self.actionStop.setObjectName(_fromUtf8('actionStop'))
self.actionManual = QtGui.QAction(MainWindow)
icon15 = QtGui.QIcon(":/help.png")
self.actionManual.setIcon(icon15)
self.actionManual.setObjectName(_fromUtf8('actionManual'))
self.actionLicense = QtGui.QAction(MainWindow)
icon16 = QtGui.QIcon(":/manual.png")
self.actionLicense.setIcon(icon16)
self.actionLicense.setObjectName(_fromUtf8('actionLicense'))
self.actionAbout = QtGui.QAction(MainWindow)
icon17 = QtGui.QIcon(":/about.png")
self.actionAbout.setIcon(icon17)
self.actionAbout.setObjectName(_fromUtf8('actionAbout'))
self.actionExit = QtGui.QAction(MainWindow)
icon18 = QtGui.QIcon(":/exit.png")
self.actionExit.setIcon(icon18)
self.actionExit.setObjectName(_fromUtf8('actionExit'))
self.actionOpenBoardEditor = QtGui.QAction(MainWindow)
icon19 = QtGui.QIcon(":/board-random.png")
self.actionOpenBoardEditor.setIcon(icon19)
self.actionOpenBoardEditor.setObjectName(_fromUtf8
('actionOpenBoardEditor'))
self.actionBoardOptions = QtGui.QAction(MainWindow)
icon20 = QtGui.QIcon(":/board-size.png")
self.actionBoardOptions.setIcon(icon20)
self.actionBoardOptions.setObjectName(_fromUtf8
('actionBoardOptions'))
self.actionLoadBoard = QtGui.QAction(MainWindow)
icon20 = QtGui.QIcon(":/board-new.png")
self.actionLoadBoard.setIcon(icon20)
self.actionLoadBoard.setObjectName(_fromUtf8
('actionLoadBoard'))
self.toolBar.addAction(self.actionChangeLang)
self.toolBar.addAction(self.actionNewFile)
self.toolBar.addAction(self.actionOpenFile)
self.toolBar.addAction(self.actionSave)
self.toolBar.addAction(self.actionCloseFile)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionUndo)
self.toolBar.addAction(self.actionRedo)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionOpenBoardEditor)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionCheck)
self.toolBar.addAction(self.actionRun)
self.toolBar.addAction(self.actionStop)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionManual)
self.toolBar.addAction(self.actionAbout)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionChangeLang)
self.menuFile.addAction(self.actionNewFile)
self.menuFile.addAction(self.actionOpenFile)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSaveAs)
self.menuFile.addAction(self.actionCloseFile)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionUndo)
self.menuEdit.addAction(self.actionRedo)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionCut)
self.menuEdit.addAction(self.actionCopy)
self.menuEdit.addAction(self.actionPaste)
self.menuEdit.addAction(self.actionSelectAll)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionFind)
self.menuEdit.addAction(self.actionReplace)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionFonts)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionPreferences)
self.menuGobstones.addSeparator()
self.menuGobstones.addAction(self.actionRun)
self.menuGobstones.addAction(self.actionStop)
self.menuGobstones.addAction(self.actionCheck)
self.menuBoard.addSeparator()
self.menuBoard.addAction(self.actionLoadBoard)
self.menuBoard.addAction(self.actionBoardOptions)
self.menuBoard.addAction(self.actionOpenBoardEditor)
self.menuBoard.addSeparator()
self.menuBoard.addAction(self.menuSelectResultView.menuAction())
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionManual)
self.menuHelp.addAction(self.actionLicense)
self.menuHelp.addAction(self.actionAbout)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuEdit.menuAction())
self.menuBar.addAction(self.menuGobstones.menuAction())
self.menuBar.addAction(self.menuBoard.menuAction())
self.menuBar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidgetEditors.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def set_highlighter(self, highlighter_class):
if hasattr(self, "highlighter"):
self.highlighter["main"].setDocument(None)
self.highlighter["library"].setDocument(None)
else:
self.highlighter = {}
self.highlighter["main"] = highlighter_class(self.textEditFile.edit.document())
self.highlighter["library"] = highlighter_class(self.textEditLibrary.edit.document())
def retranslateUi(self, MainWindow):
self.tabWidgetEditors.setTabText(
self.tabWidgetEditors.indexOf(self.tabFile),
_translate('MainWindow', i18n('Untitled'), None))
self.tabWidgetEditors.setTabText(
self.tabWidgetEditors.indexOf(self.tabLibrary),
_translate('MainWindow', i18n('Untitled'), None))
self.toolBar.setWindowTitle(_translate('MainWindow', 'toolBar', None))
self.menuFile.setTitle(_translate('MainWindow', i18n('File'), None))
self.menuEdit.setTitle(_translate('MainWindow', i18n('Edit'), None))
self.menuGobstones.setTitle(_translate('MainWindow', 'Gobstones',
None))
self.menuBoard.setTitle(_translate('MainWindow', i18n('Board'), None))
self.menuSelectResultView.setTitle(_translate('MainWindow',
i18n('Select view results'), None))
self.menuHelp.setTitle(_translate('MainWindow', i18n('Help'), None))
self.actionChangeLang.setText(_translate('MainWindow',
'Gobstones ', None))
self.actionChangeLang.setToolTip(_translate('MainWindow',
i18n('Change the Gobstones Language'), None))
self.actionChangeLang.setShortcut(_translate('MainWindow', 'F11', None))
self.actionNewFile.setText(_translate('MainWindow', i18n('New'), None))
self.actionNewFile.setToolTip(_translate('MainWindow',
i18n('Create new file'), None))
self.actionNewFile.setShortcut(_translate('MainWindow', 'Ctrl+N',
None))
self.actionCloseFile.setText(_translate('MainWindow', i18n('Close'), None))
self.actionCloseFile.setToolTip(_translate('MainWindow',
i18n('Close the current file and the library'), None))
self.actionCloseFile.setShortcut(_translate('MainWindow', 'Ctrl+R',
None))
self.actionOpenFile.setText(_translate('MainWindow', i18n('Open'), None))
self.actionOpenFile.setToolTip(_translate('MainWindow',
i18n('Open an existent file'), None))
self.actionOpenFile.setShortcut(_translate('MainWindow', 'Ctrl+O',
None))
self.actionSave.setText(_translate('MainWindow', i18n('Save'), None))
self.actionSave.setToolTip(_translate('MainWindow',
i18n('Save the current file'), None))
self.actionSave.setShortcut(_translate('MainWindow', 'Ctrl+S', None))
self.actionSaveAs.setText(_translate('MainWindow', i18n('Save as...'),
None))
self.actionSaveAs.setToolTip(_translate('MainWindow',
i18n('Save the current file and allows put a name and choose the location'),
None))
self.actionUndo.setText(_translate('MainWindow', i18n('Undo'), None))
self.actionUndo.setShortcut(_translate('MainWindow', 'Ctrl+Z', None))
self.actionRedo.setText(_translate('MainWindow', i18n('Redo'), None))
self.actionRedo.setShortcut(_translate('MainWindow', 'Ctrl+Shift+Z',
None))
self.actionCut.setText(_translate('MainWindow', i18n('Cut'), None))
self.actionCut.setShortcut(_translate('MainWindow', 'Ctrl+X', None))
self.actionCopy.setText(_translate('MainWindow', i18n('Copy'), None))
self.actionCopy.setShortcut(_translate('MainWindow', 'Ctrl+C', None))
self.actionPaste.setText(_translate('MainWindow', i18n('Paste'), None))
self.actionPaste.setShortcut(_translate('MainWindow', 'Ctrl+V', None))
self.actionSelectAll.setText(_translate('MainWindow',
i18n('Select all'), None))
self.actionSelectAll.setShortcut(_translate('MainWindow', 'Ctrl+A',
None))
self.actionFind.setText(_translate('MainWindow', i18n('Search'), None))
self.actionFind.setShortcut(_translate('MainWindow', 'Ctrl+F', None))
self.actionReplace.setText(_translate('MainWindow', i18n('Search and replace'), None))
self.actionReplace.setShortcut(_translate('MainWindow', 'Ctrl+H', None))
self.actionFonts.setText(_translate('MainWindow', i18n('Select fonts'), None))
self.actionFonts.setShortcut(_translate('MainWindow', 'Ctrl+T', None))
self.actionPreferences.setText(_translate('MainWindow', i18n('Preferences'), None))
self.actionPreferences.setShortcut(_translate('MainWindow', 'Ctrl+P', None))
self.actionRun.setText(_translate('MainWindow', i18n('Run'), None))
self.actionRun.setToolTip(_translate('MainWindow',
i18n('Executes the current program'), None))
self.actionRun.setShortcut(_translate('MainWindow', 'F5', None))
self.actionStop.setText(_translate('MainWindow', i18n('Stop'), None))
self.actionStop.setToolTip(_translate('MainWindow',
i18n('Stops execution of the current program'), None))
self.actionStop.setShortcut(_translate('MainWindow', 'F6', None))
self.actionCheck.setText(_translate('MainWindow', i18n('Check'), None))
self.actionCheck.setToolTip(_translate('MainWindow',
i18n('Checks if the program is well-formed'), None))
self.actionCheck.setShortcut(_translate('MainWindow', 'F10', None))
self.actionManual.setText(_translate('MainWindow', i18n('Manual'), None))
self.actionManual.setToolTip(_translate('MainWindow',
i18n('Open the Gobstones\'s manual'), None))
self.actionLicense.setText(_translate('MainWindow', i18n('Licence'), None))
self.actionAbout.setText(_translate('MainWindow', i18n('About...'),
None))
self.actionExit.setText(_translate('MainWindow', i18n('Exit'), None))
self.actionExit.setToolTip(_translate('MainWindow',
i18n('Closes the application'), None))
self.actionExit.setShortcut(_translate('MainWindow', 'Ctrl+Q', None))
self.actionOpenBoardEditor.setText(_translate('MainWindow',
i18n('Board editor'), None))
self.actionOpenBoardEditor.setToolTip(_translate('MainWindow',
i18n('Open board editor'), None))
self.actionBoardOptions.setText(_translate('MainWindow',
i18n('Options Board'), None))
self.actionBoardOptions.setToolTip(_translate('MainWindow',
i18n('Select board options'), None))
self.actionLoadBoard.setText(_translate('MainWindow',
i18n('Load board'), None))
self.actionLoadBoard.setToolTip(_translate('MainWindow',
i18n('Open a board from existing .gbb file'), None))
| gpl-3.0 |
tophua/spark1.52 | examples/src/main/python/sql.py | 8 | 3097 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType
if __name__ == "__main__":
sc = SparkContext(appName="PythonSQL")
sqlContext = SQLContext(sc)
# RDD is created from a list of rows
some_rdd = sc.parallelize([Row(name="John", age=19),
Row(name="Smith", age=23),
Row(name="Sarah", age=18)])
# Infer schema from the first row, create a DataFrame and print the schema
some_df = sqlContext.createDataFrame(some_rdd)
some_df.printSchema()
# Another RDD is created from a list of tuples
another_rdd = sc.parallelize([("John", 19), ("Smith", 23), ("Sarah", 18)])
# Schema with two fields - person_name and person_age
schema = StructType([StructField("person_name", StringType(), False),
StructField("person_age", IntegerType(), False)])
# Create a DataFrame by applying the schema to the RDD and print the schema
another_df = sqlContext.createDataFrame(another_rdd, schema)
another_df.printSchema()
# root
# |-- age: integer (nullable = true)
# |-- name: string (nullable = true)
# A JSON dataset is pointed to by path.
# The path can be either a single text file or a directory storing text files.
if len(sys.argv) < 2:
path = "file://" + \
os.path.join(os.environ['SPARK_HOME'], "examples/src/main/resources/people.json")
else:
path = sys.argv[1]
# Create a DataFrame from the file(s) pointed to by path
people = sqlContext.jsonFile(path)
# root
# |-- person_name: string (nullable = false)
# |-- person_age: integer (nullable = false)
# The inferred schema can be visualized using the printSchema() method.
people.printSchema()
# root
# |-- age: IntegerType
# |-- name: StringType
# Register this DataFrame as a table.
people.registerAsTable("people")
# SQL statements can be run by using the sql methods provided by sqlContext
teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
for each in teenagers.collect():
print(each[0])
sc.stop()
| apache-2.0 |
timlinux/geonode | docs/conf.py | 5 | 8711 | # -*- coding: utf-8 -*-
#
# GeoNode documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 3 16:20:38 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
DOC_PATH = os.path.dirname(__file__)
PROJ_PATH = os.path.join(DOC_PATH, '../')
sys.path.extend([DOC_PATH, PROJ_PATH])
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geonode.settings")
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GeoNode'
copyright = u'2017, Open Source Geospatial Foundation, CC-SA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.8'
# The full version, including alpha/beta/rc tags.
release = '2.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
locale_dirs = ['i18n/']
gettext_compact = True
# -- Options for HTML output ---------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GeoNodedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GeoNode.tex', u'GeoNode Documentation',
u'GeoNode Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'geonode', u'GeoNode Documentation',
[u'GeoNode Development Team'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GeoNode'
epub_author = u'GeoNode Development Team'
epub_publisher = u'GeoNode Development Team'
epub_copyright = u'2017, Open Source Geospatial Foundation, CC-SA'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#def setup(app):
# from _ext import django_model_fields
# app.connect('autodoc-process-docstring', django_model_fields.process_docstring)
| gpl-3.0 |
forkbong/qutebrowser | tests/unit/browser/webkit/network/test_filescheme.py | 1 | 9313 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# Copyright 2015-2018 Antoni Boucher (antoyo) <bouanto@zoho.com>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import os
import dataclasses
from typing import List
import pytest
import bs4
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import QNetworkRequest
from qutebrowser.browser.webkit.network import filescheme
from qutebrowser.utils import urlutils, utils
from helpers import utils as testutils
@pytest.mark.parametrize('create_file, create_dir, filterfunc, expected', [
(True, False, os.path.isfile, True),
(True, False, os.path.isdir, False),
(False, True, os.path.isfile, False),
(False, True, os.path.isdir, True),
(False, False, os.path.isfile, False),
(False, False, os.path.isdir, False),
])
def test_get_file_list(tmpdir, create_file, create_dir, filterfunc, expected):
"""Test get_file_list."""
path = tmpdir / 'foo'
if create_file or create_dir:
path.ensure(dir=create_dir)
all_files = os.listdir(str(tmpdir))
result = filescheme.get_file_list(str(tmpdir), all_files, filterfunc)
item = {'name': 'foo', 'absname': str(path)}
assert (item in result) == expected
class TestIsRoot:
@pytest.mark.windows
@pytest.mark.parametrize('directory, is_root', [
('C:\\foo\\bar', False),
('C:\\foo\\', False),
('C:\\foo', False),
('C:\\', True)
])
def test_windows(self, directory, is_root):
assert filescheme.is_root(directory) == is_root
@pytest.mark.posix
@pytest.mark.parametrize('directory, is_root', [
('/foo/bar', False),
('/foo/', False),
('/foo', False),
('/', True)
])
def test_posix(self, directory, is_root):
assert filescheme.is_root(directory) == is_root
class TestParentDir:
@pytest.mark.windows
@pytest.mark.parametrize('directory, parent', [
('C:\\foo\\bar', 'C:\\foo'),
('C:\\foo', 'C:\\'),
('C:\\foo\\', 'C:\\'),
('C:\\', 'C:\\'),
])
def test_windows(self, directory, parent):
assert filescheme.parent_dir(directory) == parent
@pytest.mark.posix
@pytest.mark.parametrize('directory, parent', [
('/home/foo', '/home'),
('/home', '/'),
('/home/', '/'),
('/', '/'),
])
def test_posix(self, directory, parent):
assert filescheme.parent_dir(directory) == parent
def _file_url(path):
"""Return a file:// url (as string) for the given LocalPath.
Arguments:
path: The filepath as LocalPath (as handled by py.path)
"""
return urlutils.file_url(str(path))
class TestDirbrowserHtml:
@dataclasses.dataclass
class Parsed:
parent: str
folders: List[str]
files: List[str]
@dataclasses.dataclass
class Item:
link: str
text: str
@pytest.fixture
def parser(self):
"""Provide a function to get a parsed dirbrowser document."""
def parse(path):
html = filescheme.dirbrowser_html(path).decode('utf-8')
soup = bs4.BeautifulSoup(html, 'html.parser')
with testutils.ignore_bs4_warning():
print(soup.prettify())
container = soup('div', id='dirbrowserContainer')[0]
parent_elem = container('ul', class_='parent')
if not parent_elem:
parent = None
else:
parent = parent_elem[0].li.a.string
folders = []
files = []
for li in container('ul', class_='folders')[0]('li'):
item = self.Item(link=li.a['href'], text=str(li.a.string))
folders.append(item)
for li in container('ul', class_='files')[0]('li'):
item = self.Item(link=li.a['href'], text=str(li.a.string))
files.append(item)
return self.Parsed(parent=parent, folders=folders, files=files)
return parse
def test_basic(self):
html = filescheme.dirbrowser_html(os.getcwd()).decode('utf-8')
soup = bs4.BeautifulSoup(html, 'html.parser')
with testutils.ignore_bs4_warning():
print(soup.prettify())
container = soup.div
assert container['id'] == 'dirbrowserContainer'
title_elem = container('div', id='dirbrowserTitle')[0]
title_text = title_elem('p', id='dirbrowserTitleText')[0].text
assert title_text == 'Browse directory: {}'.format(os.getcwd())
def test_icons(self, monkeypatch):
"""Make sure icon paths are correct file:// URLs."""
monkeypatch.setattr(filescheme.jinja.utils, 'resource_filename',
lambda name: '/test path/foo.svg')
html = filescheme.dirbrowser_html(os.getcwd()).decode('utf-8')
soup = bs4.BeautifulSoup(html, 'html.parser')
with testutils.ignore_bs4_warning():
print(soup.prettify())
css = soup.html.head.style.string
assert "background-image: url('file:///test%20path/foo.svg');" in css
def test_empty(self, tmpdir, parser):
parsed = parser(str(tmpdir))
assert parsed.parent
assert not parsed.folders
assert not parsed.files
def test_files(self, tmpdir, parser):
foo_file = tmpdir / 'foo'
bar_file = tmpdir / 'bar'
foo_file.ensure()
bar_file.ensure()
parsed = parser(str(tmpdir))
assert parsed.parent
assert not parsed.folders
foo_item = self.Item(_file_url(foo_file), foo_file.relto(tmpdir))
bar_item = self.Item(_file_url(bar_file), bar_file.relto(tmpdir))
assert parsed.files == [bar_item, foo_item]
def test_html_special_chars(self, tmpdir, parser):
special_file = tmpdir / 'foo&bar'
special_file.ensure()
parsed = parser(str(tmpdir))
item = self.Item(_file_url(special_file), special_file.relto(tmpdir))
assert parsed.files == [item]
def test_dirs(self, tmpdir, parser):
foo_dir = tmpdir / 'foo'
bar_dir = tmpdir / 'bar'
foo_dir.ensure(dir=True)
bar_dir.ensure(dir=True)
parsed = parser(str(tmpdir))
assert parsed.parent
assert not parsed.files
foo_item = self.Item(_file_url(foo_dir), foo_dir.relto(tmpdir))
bar_item = self.Item(_file_url(bar_dir), bar_dir.relto(tmpdir))
assert parsed.folders == [bar_item, foo_item]
def test_mixed(self, tmpdir, parser):
foo_file = tmpdir / 'foo'
bar_dir = tmpdir / 'bar'
foo_file.ensure()
bar_dir.ensure(dir=True)
parsed = parser(str(tmpdir))
foo_item = self.Item(_file_url(foo_file), foo_file.relto(tmpdir))
bar_item = self.Item(_file_url(bar_dir), bar_dir.relto(tmpdir))
assert parsed.parent
assert parsed.files == [foo_item]
assert parsed.folders == [bar_item]
def test_root_dir(self, tmpdir, parser):
root_dir = 'C:\\' if utils.is_windows else '/'
parsed = parser(root_dir)
assert not parsed.parent
def test_oserror(self, mocker):
m = mocker.patch('qutebrowser.browser.webkit.network.filescheme.'
'os.listdir')
m.side_effect = OSError('Error message')
html = filescheme.dirbrowser_html('').decode('utf-8')
soup = bs4.BeautifulSoup(html, 'html.parser')
with testutils.ignore_bs4_warning():
print(soup.prettify())
error_msg = soup('p', id='error-message-text')[0].string
assert error_msg == 'Error message'
class TestFileSchemeHandler:
def test_dir(self, tmpdir):
url = QUrl.fromLocalFile(str(tmpdir))
req = QNetworkRequest(url)
reply = filescheme.handler(req, None, None)
# The URL will always use /, even on Windows - so we force this here
# too.
tmpdir_path = str(tmpdir).replace(os.sep, '/')
assert reply.readAll() == filescheme.dirbrowser_html(tmpdir_path)
def test_file(self, tmpdir):
filename = tmpdir / 'foo'
filename.ensure()
url = QUrl.fromLocalFile(str(filename))
req = QNetworkRequest(url)
reply = filescheme.handler(req, None, None)
assert reply is None
def test_unicode_encode_error(self, mocker):
url = QUrl('file:///tmp/foo')
req = QNetworkRequest(url)
err = UnicodeEncodeError('ascii', '', 0, 2, 'foo')
mocker.patch('os.path.isdir', side_effect=err)
reply = filescheme.handler(req, None, None)
assert reply is None
| gpl-3.0 |
securestate/king-phisher | tests/plugins.py | 4 | 2944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/sms.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import platform
import sys
import unittest
from king_phisher import plugins
from king_phisher import testing
from king_phisher import version
class PluginRequirementsTests(testing.KingPhisherTestCase):
def _test_requirement(self, requirement, case_true, case_false):
requirements = plugins.Requirements([(requirement, case_true)])
self.assertTrue(requirements.is_compatible)
requirements = plugins.Requirements([(requirement, case_false)])
self.assertFalse(requirements.is_compatible)
def test_empty_requirements(self):
requirements = plugins.Requirements({})
self.assertTrue(requirements.is_compatible, 'no requirements means compatible')
def test_req_minimum_python_version(self):
version_info = sys.version_info
self._test_requirement(
'minimum-python-version',
"{0}.{1}".format(version_info.major, version_info.minor),
"{0}.{1}".format(version_info.major, version_info.minor + 1)
)
def test_req_minimum_version(self):
version_info = version.version_info
self._test_requirement(
'minimum-version',
version.distutils_version,
"{0}.{1}".format(version_info.major, version_info.minor + 1)
)
def test_req_platforms(self):
self._test_requirement(
'platforms',
[],
['Foobar']
)
self._test_requirement(
'platforms',
[platform.system().lower()],
['Foobar']
)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
davy39/eric | Helpviewer/AdBlock/AdBlockSubscription.py | 1 | 24766 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the AdBlock subscription class.
"""
from __future__ import unicode_literals
import os
import re
import hashlib
import base64
from PyQt5.QtCore import pyqtSignal, Qt, QObject, QByteArray, QDateTime, \
QUrl, QCryptographicHash, QFile, QIODevice, QTextStream, QDate, QTime, \
qVersion
from PyQt5.QtNetwork import QNetworkReply
from E5Gui import E5MessageBox
import Utilities
import Preferences
class AdBlockSubscription(QObject):
"""
Class implementing the AdBlock subscription.
@signal changed() emitted after the subscription has changed
@signal rulesChanged() emitted after the subscription's rules have changed
@signal enabledChanged(bool) emitted after the enabled state was changed
"""
changed = pyqtSignal()
rulesChanged = pyqtSignal()
enabledChanged = pyqtSignal(bool)
def __init__(self, url, custom, parent=None, default=False):
"""
Constructor
@param url AdBlock URL for the subscription (QUrl)
@param custom flag indicating a custom subscription (boolean)
@param parent reference to the parent object (QObject)
@param default flag indicating a default subscription (boolean)
"""
super(AdBlockSubscription, self).__init__(parent)
self.__custom = custom
self.__url = url.toEncoded()
self.__enabled = False
self.__downloading = None
self.__defaultSubscription = default
self.__title = ""
self.__location = QByteArray()
self.__lastUpdate = QDateTime()
self.__requiresLocation = ""
self.__requiresTitle = ""
self.__updatePeriod = 0 # update period in hours, 0 = use default
self.__remoteModified = QDateTime()
self.__rules = [] # list containing all AdBlock rules
self.__networkExceptionRules = []
self.__networkBlockRules = []
self.__domainRestrictedCssRules = []
self.__elementHidingRules = ""
self.__documentRules = []
self.__elemhideRules = []
self.__checksumRe = re.compile(
r"""^\s*!\s*checksum[\s\-:]+([\w\+\/=]+).*\n""",
re.IGNORECASE | re.MULTILINE)
self.__expiresRe = re.compile(
r"""(?:expires:|expires after)\s*(\d+)\s*(hour|h)?""",
re.IGNORECASE)
self.__remoteModifiedRe = re.compile(
r"""!\s*(?:Last modified|Updated):\s*(\d{1,2})\s*"""
r"""(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s*"""
r"""(\d{2,4})\s*((\d{1,2}):(\d{2}))?""",
re.IGNORECASE)
self.__monthNameToNumber = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12
}
self.__parseUrl(url)
def __parseUrl(self, url):
"""
Private method to parse the AdBlock URL for the subscription.
@param url AdBlock URL for the subscription (QUrl)
"""
if url.scheme() != "abp":
return
if url.path() != "subscribe":
return
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
urlQuery = QUrlQuery(url)
self.__title = urlQuery.queryItemValue("title")
self.__enabled = urlQuery.queryItemValue("enabled") != "false"
self.__location = QByteArray(urlQuery.queryItemValue("location"))
# Check for required subscription
self.__requiresLocation = urlQuery.queryItemValue(
"requiresLocation")
self.__requiresTitle = urlQuery.queryItemValue("requiresTitle")
if self.__requiresLocation and self.__requiresTitle:
import Helpviewer.HelpWindow
Helpviewer.HelpWindow.HelpWindow.adBlockManager()\
.loadRequiredSubscription(self.__requiresLocation,
self.__requiresTitle)
lastUpdateString = urlQuery.queryItemValue("lastUpdate")
self.__lastUpdate = QDateTime.fromString(lastUpdateString,
Qt.ISODate)
else:
self.__title = \
QUrl.fromPercentEncoding(url.encodedQueryItemValue("title"))
self.__enabled = QUrl.fromPercentEncoding(
url.encodedQueryItemValue("enabled")) != "false"
self.__location = QByteArray(QUrl.fromPercentEncoding(
url.encodedQueryItemValue("location")))
# Check for required subscription
self.__requiresLocation = QUrl.fromPercentEncoding(
url.encodedQueryItemValue("requiresLocation"))
self.__requiresTitle = QUrl.fromPercentEncoding(
url.encodedQueryItemValue("requiresTitle"))
if self.__requiresLocation and self.__requiresTitle:
import Helpviewer.HelpWindow
Helpviewer.HelpWindow.HelpWindow.adBlockManager()\
.loadRequiredSubscription(self.__requiresLocation,
self.__requiresTitle)
lastUpdateByteArray = url.encodedQueryItemValue("lastUpdate")
lastUpdateString = QUrl.fromPercentEncoding(lastUpdateByteArray)
self.__lastUpdate = QDateTime.fromString(lastUpdateString,
Qt.ISODate)
self.__loadRules()
def url(self):
"""
Public method to generate the URL for this subscription.
@return AdBlock URL for the subscription (QUrl)
"""
url = QUrl()
url.setScheme("abp")
url.setPath("subscribe")
queryItems = []
queryItems.append(("location", bytes(self.__location).decode()))
queryItems.append(("title", self.__title))
if self.__requiresLocation and self.__requiresTitle:
queryItems.append(("requiresLocation", self.__requiresLocation))
queryItems.append(("requiresTitle", self.__requiresTitle))
if not self.__enabled:
queryItems.append(("enabled", "false"))
if self.__lastUpdate.isValid():
queryItems.append(("lastUpdate",
self.__lastUpdate.toString(Qt.ISODate)))
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
query = QUrlQuery()
query.setQueryItems(queryItems)
url.setQuery(query)
else:
url.setQueryItems(queryItems)
return url
def isEnabled(self):
"""
Public method to check, if the subscription is enabled.
@return flag indicating the enabled status (boolean)
"""
return self.__enabled
def setEnabled(self, enabled):
"""
Public method to set the enabled status.
@param enabled flag indicating the enabled status (boolean)
"""
if self.__enabled == enabled:
return
self.__enabled = enabled
self.enabledChanged.emit(enabled)
def title(self):
"""
Public method to get the subscription title.
@return subscription title (string)
"""
return self.__title
def setTitle(self, title):
"""
Public method to set the subscription title.
@param title subscription title (string)
"""
if self.__title == title:
return
self.__title = title
self.changed.emit()
def location(self):
"""
Public method to get the subscription location.
@return URL of the subscription location (QUrl)
"""
return QUrl.fromEncoded(self.__location)
def setLocation(self, url):
"""
Public method to set the subscription location.
@param url URL of the subscription location (QUrl)
"""
if url == self.location():
return
self.__location = url.toEncoded()
self.__lastUpdate = QDateTime()
self.changed.emit()
def requiresLocation(self):
"""
Public method to get the location of a required subscription.
@return location of a required subscription (string)
"""
return self.__requiresLocation
def lastUpdate(self):
"""
Public method to get the date and time of the last update.
@return date and time of the last update (QDateTime)
"""
return self.__lastUpdate
def rulesFileName(self):
"""
Public method to get the name of the rules file.
@return name of the rules file (string)
"""
if self.location().scheme() == "file":
return self.location().toLocalFile()
if self.__location.isEmpty():
return ""
sha1 = bytes(QCryptographicHash.hash(
self.__location, QCryptographicHash.Sha1).toHex()).decode()
dataDir = os.path.join(
Utilities.getConfigDir(), "browser", "subscriptions")
if not os.path.exists(dataDir):
os.makedirs(dataDir)
fileName = os.path.join(
dataDir, "adblock_subscription_{0}".format(sha1))
return fileName
def __loadRules(self):
"""
Private method to load the rules of the subscription.
"""
fileName = self.rulesFileName()
f = QFile(fileName)
if f.exists():
if not f.open(QIODevice.ReadOnly):
E5MessageBox.warning(
None,
self.tr("Load subscription rules"),
self.tr(
"""Unable to open adblock file '{0}' for reading.""")
.format(fileName))
else:
textStream = QTextStream(f)
header = textStream.readLine(1024)
if not header.startswith("[Adblock"):
E5MessageBox.warning(
None,
self.tr("Load subscription rules"),
self.tr("""AdBlock file '{0}' does not start"""
""" with [Adblock.""")
.format(fileName))
f.close()
f.remove()
self.__lastUpdate = QDateTime()
else:
from .AdBlockRule import AdBlockRule
self.__updatePeriod = 0
self.__remoteModified = QDateTime()
self.__rules = []
self.__rules.append(AdBlockRule(header, self))
while not textStream.atEnd():
line = textStream.readLine()
self.__rules.append(AdBlockRule(line, self))
expires = self.__expiresRe.search(line)
if expires:
period, kind = expires.groups()
if kind:
# hours
self.__updatePeriod = int(period)
else:
# days
self.__updatePeriod = int(period) * 24
remoteModified = self.__remoteModifiedRe.search(line)
if remoteModified:
day, month, year, time, hour, minute = \
remoteModified.groups()
self.__remoteModified.setDate(
QDate(int(year),
self.__monthNameToNumber[month],
int(day))
)
if time:
self.__remoteModified.setTime(
QTime(int(hour), int(minute)))
self.__populateCache()
self.changed.emit()
elif not fileName.endswith("_custom"):
self.__lastUpdate = QDateTime()
self.checkForUpdate()
def checkForUpdate(self):
"""
Public method to check for an update.
"""
if self.__updatePeriod:
updatePeriod = self.__updatePeriod
else:
updatePeriod = Preferences.getHelp("AdBlockUpdatePeriod") * 24
if not self.__lastUpdate.isValid() or \
(self.__remoteModified.isValid() and
self.__remoteModified.addSecs(updatePeriod * 3600) <
QDateTime.currentDateTime()) or \
self.__lastUpdate.addSecs(updatePeriod * 3600) < \
QDateTime.currentDateTime():
self.updateNow()
def updateNow(self):
"""
Public method to update the subscription immediately.
"""
if self.__downloading is not None:
return
if not self.location().isValid():
return
if self.location().scheme() == "file":
self.__lastUpdate = QDateTime.currentDateTime()
self.__loadRules()
return
import Helpviewer.HelpWindow
from Helpviewer.Network.FollowRedirectReply import FollowRedirectReply
self.__downloading = FollowRedirectReply(
self.location(),
Helpviewer.HelpWindow.HelpWindow.networkAccessManager())
self.__downloading.finished.connect(self.__rulesDownloaded)
def __rulesDownloaded(self):
"""
Private slot to deal with the downloaded rules.
"""
reply = self.sender()
response = reply.readAll()
reply.close()
self.__downloading = None
if reply.error() != QNetworkReply.NoError:
if not self.__defaultSubscription:
# don't show error if we try to load the default
E5MessageBox.warning(
None,
self.tr("Downloading subscription rules"),
self.tr(
"""<p>Subscription rules could not be"""
""" downloaded.</p><p>Error: {0}</p>""")
.format(reply.errorString()))
else:
# reset after first download attempt
self.__defaultSubscription = False
return
if response.isEmpty():
E5MessageBox.warning(
None,
self.tr("Downloading subscription rules"),
self.tr("""Got empty subscription rules."""))
return
fileName = self.rulesFileName()
QFile.remove(fileName)
f = QFile(fileName)
if not f.open(QIODevice.ReadWrite):
E5MessageBox.warning(
None,
self.tr("Downloading subscription rules"),
self.tr(
"""Unable to open adblock file '{0}' for writing.""")
.file(fileName))
return
f.write(response)
f.close()
self.__lastUpdate = QDateTime.currentDateTime()
if self.__validateCheckSum(fileName):
self.__loadRules()
else:
QFile.remove(fileName)
self.__downloading = None
def __validateCheckSum(self, fileName):
"""
Private method to check the subscription file's checksum.
@param fileName name of the file containing the subscription (string)
@return flag indicating a valid file (boolean). A file is considered
valid, if the checksum is OK or the file does not contain a
checksum (i.e. cannot be checked).
"""
try:
f = open(fileName, "r", encoding="utf-8")
data = f.read()
f.close()
except (IOError, OSError):
return False
match = re.search(self.__checksumRe, data)
if match:
expectedChecksum = match.group(1)
else:
# consider it as valid
return True
# normalize the data
data = re.sub(r"\r", "", data) # normalize eol
data = re.sub(r"\n+", "\n", data) # remove empty lines
data = re.sub(self.__checksumRe, "", data) # remove checksum line
# calculate checksum
md5 = hashlib.md5()
md5.update(data.encode("utf-8"))
calculatedChecksum = base64.b64encode(md5.digest()).decode()\
.rstrip("=")
if calculatedChecksum == expectedChecksum:
return True
else:
res = E5MessageBox.yesNo(
None,
self.tr("Downloading subscription rules"),
self.tr(
"""<p>AdBlock subscription <b>{0}</b> has a wrong"""
""" checksum.<br/>"""
"""Found: {1}<br/>"""
"""Calculated: {2}<br/>"""
"""Use it anyway?</p>""")
.format(self.__title, expectedChecksum,
calculatedChecksum))
return res
def saveRules(self):
"""
Public method to save the subscription rules.
"""
fileName = self.rulesFileName()
if not fileName:
return
f = QFile(fileName)
if not f.open(QIODevice.ReadWrite | QIODevice.Truncate):
E5MessageBox.warning(
None,
self.tr("Saving subscription rules"),
self.tr(
"""Unable to open adblock file '{0}' for writing.""")
.format(fileName))
return
textStream = QTextStream(f)
if not self.__rules or not self.__rules[0].isHeader():
textStream << "[Adblock Plus 1.1.1]\n"
for rule in self.__rules:
textStream << rule.filter() << "\n"
def match(self, req, urlDomain, urlString):
"""
Public method to check the subscription for a matching rule.
@param req reference to the network request (QNetworkRequest)
@param urlDomain domain of the URL (string)
@param urlString URL (string)
@return reference to the rule object or None (AdBlockRule)
"""
for rule in self.__networkExceptionRules:
if rule.networkMatch(req, urlDomain, urlString):
return None
for rule in self.__networkBlockRules:
if rule.networkMatch(req, urlDomain, urlString):
return rule
return None
def adBlockDisabledForUrl(self, url):
"""
Public method to check, if AdBlock is disabled for the given URL.
@param url URL to check (QUrl)
@return flag indicating disabled state (boolean)
"""
for rule in self.__documentRules:
if rule.urlMatch(url):
return True
return False
def elemHideDisabledForUrl(self, url):
"""
Public method to check, if element hiding is disabled for the given
URL.
@param url URL to check (QUrl)
@return flag indicating disabled state (boolean)
"""
if self.adBlockDisabledForUrl(url):
return True
for rule in self.__elemhideRules:
if rule.urlMatch(url):
return True
return False
def elementHidingRules(self):
"""
Public method to get the element hiding rules.
@return element hiding rules (string)
"""
return self.__elementHidingRules
def elementHidingRulesForDomain(self, domain):
"""
Public method to get the element hiding rules for the given domain.
@param domain domain name (string)
@return element hiding rules (string)
"""
rules = ""
for rule in self.__domainRestrictedCssRules:
if rule.matchDomain(domain):
rules += rule.cssSelector() + ","
return rules
def rule(self, offset):
"""
Public method to get a specific rule.
@param offset offset of the rule (integer)
@return requested rule (AdBlockRule)
"""
if offset >= len(self.__rules):
return None
return self.__rules[offset]
def allRules(self):
"""
Public method to get the list of rules.
@return list of rules (list of AdBlockRule)
"""
return self.__rules[:]
def addRule(self, rule):
"""
Public method to add a rule.
@param rule reference to the rule to add (AdBlockRule)
@return offset of the rule (integer)
"""
self.__rules.append(rule)
self.__populateCache()
self.rulesChanged.emit()
return len(self.__rules) - 1
def removeRule(self, offset):
"""
Public method to remove a rule given the offset.
@param offset offset of the rule to remove (integer)
"""
if offset < 0 or offset > len(self.__rules):
return
del self.__rules[offset]
self.__populateCache()
self.rulesChanged.emit()
def replaceRule(self, rule, offset):
"""
Public method to replace a rule given the offset.
@param rule reference to the rule to set (AdBlockRule)
@param offset offset of the rule to remove (integer)
@return requested rule (AdBlockRule)
"""
if offset >= len(self.__rules):
return None
self.__rules[offset] = rule
self.__populateCache()
self.rulesChanged.emit()
return self.__rules[offset]
def __populateCache(self):
"""
Private method to populate the various rule caches.
"""
self.__networkExceptionRules = []
self.__networkBlockRules = []
self.__domainRestrictedCssRules = []
self.__elementHidingRules = ""
self.__documentRules = []
self.__elemhideRules = []
for rule in self.__rules:
if not rule.isEnabled():
continue
if rule.isCSSRule():
if rule.isDomainRestricted():
self.__domainRestrictedCssRules.append(rule)
else:
self.__elementHidingRules += rule.cssSelector() + ","
elif rule.isDocument():
self.__documentRules.append(rule)
elif rule.isElementHiding():
self.__elemhideRules.append(rule)
elif rule.isException():
self.__networkExceptionRules.append(rule)
else:
self.__networkBlockRules.append(rule)
def canEditRules(self):
"""
Public method to check, if rules can be edited.
@return flag indicating rules may be edited (boolean)
"""
return self.__custom
def canBeRemoved(self):
"""
Public method to check, if the subscription can be removed.
@return flag indicating removal is allowed (boolean)
"""
return not self.__custom and not self.__defaultSubscription
def setRuleEnabled(self, offset, enabled):
"""
Public method to enable a specific rule.
@param offset offset of the rule (integer)
@param enabled new enabled state (boolean)
@return reference to the changed rule (AdBlockRule)
"""
if offset >= len(self.__rules):
return None
rule = self.__rules[offset]
rule.setEnabled(enabled)
if rule.isCSSRule():
import Helpviewer.HelpWindow
self.__populateCache()
Helpviewer.HelpWindow.HelpWindow.mainWindow()\
.reloadUserStyleSheet()
return rule
| gpl-3.0 |
mozilla/bedrock | bedrock/mozorg/tests/test_views.py | 4 | 3290 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from django.test.client import RequestFactory
from bedrock.base.urlresolvers import reverse
from mock import ANY, patch
from bedrock.mozorg.tests import TestCase
from bedrock.mozorg import views
class TestViews(TestCase):
@patch.dict(os.environ, FUNNELCAKE_5_LOCALES='en-US', FUNNELCAKE_5_PLATFORMS='win')
def test_download_button_funnelcake(self):
"""The download button should have the funnelcake ID."""
with self.activate('en-US'):
resp = self.client.get(reverse('mozorg.home'), {'f': '5'})
assert b'product=firefox-stub-f5&' in resp.content
def test_download_button_bad_funnelcake(self):
"""The download button should not have a bad funnelcake ID."""
with self.activate('en-US'):
resp = self.client.get(reverse('mozorg.home'), {'f': '5dude'})
assert b'product=firefox-stub&' in resp.content
assert b'product=firefox-stub-f5dude&' not in resp.content
resp = self.client.get(reverse('mozorg.home'), {'f': '999999999'})
assert b'product=firefox-stub&' in resp.content
assert b'product=firefox-stub-f999999999&' not in resp.content
class TestRobots(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.view = views.Robots()
def test_production_disallow_all_is_false(self):
self.view.request = self.rf.get('/', HTTP_HOST='www.mozilla.org')
self.assertFalse(self.view.get_context_data()['disallow_all'])
def test_non_production_disallow_all_is_true(self):
self.view.request = self.rf.get('/', HTTP_HOST='www.allizom.org')
self.assertTrue(self.view.get_context_data()['disallow_all'])
def test_robots_no_redirect(self):
response = self.client.get('/robots.txt', HTTP_HOST='www.mozilla.org')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context_data['disallow_all'])
self.assertEqual(response.get('Content-Type'), 'text/plain')
@patch('bedrock.mozorg.views.l10n_utils.render')
class TestHomePage(TestCase):
def setUp(self):
self.rf = RequestFactory()
def test_home_en_template(self, render_mock):
req = RequestFactory().get('/')
req.locale = 'en-US'
views.home_view(req)
render_mock.assert_called_once_with(req, 'mozorg/home/home-en.html', ANY)
def test_home_de_template(self, render_mock):
req = RequestFactory().get('/')
req.locale = 'de'
views.home_view(req)
render_mock.assert_called_once_with(req, 'mozorg/home/home-de.html', ANY)
def test_home_fr_template(self, render_mock):
req = RequestFactory().get('/')
req.locale = 'fr'
views.home_view(req)
render_mock.assert_called_once_with(req, 'mozorg/home/home-fr.html', ANY)
def test_home_locale_template(self, render_mock):
req = RequestFactory().get('/')
req.locale = 'es'
views.home_view(req)
render_mock.assert_called_once_with(req, 'mozorg/home/home.html', ANY)
| mpl-2.0 |
linuxdemon1/hexchat-plugins | znc-snofilter.py | 2 | 1427 | # coding=utf-8
from fnmatch import fnmatch
import hexchat
__module_name__ = "ZNC-snofilter"
__module_author__ = "linuxdaemon"
__module_version__ = "0.0.1"
__module_description__ = "Companion script to my snofilter module for ZNC, moves all notices from the module in to query windows"
def get_net_name():
for ctx in hexchat.get_list('channels'):
if ctx.type == 1 and ctx.network == hexchat.get_info('network'):
return ctx.channel
def handle(word, word_eol, event):
if not fnmatch(word[0], "*!snofilter@znc.in"):
return hexchat.EAT_NONE
window = word[0].split('!', 1)[0][2:].lower()
net = "{}-snotices".format(get_net_name())
serv = hexchat.find_context(net)
if not serv:
hexchat.command("newserver -noconnect {}".format(net))
serv = hexchat.find_context(net)
serv.command("query -nofocus {}".format(window))
window_ctx = hexchat.find_context(net, window)
window_ctx.emit_print(event, window, word_eol[3][1:])
return hexchat.EAT_HEXCHAT
def on_notice(word, word_eol, userdata):
return handle(word, word_eol, "Notice")
def on_privmsg(word, word_eol, userdata):
return handle(word, word_eol, "Private Message to Dialog")
hexchat.hook_unload(lambda userdata: print(__module_name__, "plugin unloaded"))
hexchat.hook_server("NOTICE", on_notice)
hexchat.hook_server("PRIVMSG", on_privmsg)
print(__module_name__, "plugin loaded")
| gpl-3.0 |
mjabri/topographica | topo/transferfn/misc.py | 2 | 22352 | """
Transfer functions with more complex dependencies.
$Id: basic.py 10790 2009-11-21 17:51:33Z antolikjan $
"""
import copy
import numpy as np
import param
import imagen
from holoviews import Image
import topo
import topo.base.functionfamily
from topo.base.arrayutil import clip_lower,array_argmax
from topo.base.boundingregion import BoundingBox
from topo.base.sheetcoords import SheetCoordinateSystem
from topo.transferfn import TransferFn, TransferFnWithState
# Not suitable for basic.py due to its dependence on patterns.
class PatternCombine(TransferFn):
"""
Combine the supplied pattern with one generated using a
PatternGenerator.
Useful for operations like adding noise or masking out lesioned
items or around the edges of non-rectangular shapes.
"""
generator = param.ClassSelector(imagen.PatternGenerator,
default=imagen.Constant(), doc="""
Pattern to combine with the supplied matrix.""")
operator = param.Parameter(np.multiply,precedence=0.98,doc="""
Binary Numeric function used to combine the two patterns.
Any binary Numeric array "ufunc" returning the same type of
array as the operands and supporting the reduce operator is
allowed here. See topo.pattern.Composite.operator for more
details.
""")
def __call__(self,x):
###JABHACKALERT: Need to set it up to be independent of
#density; right now only things like random numbers work
#reasonably
rows,cols = x.shape
bb = BoundingBox(points=((0,0), (rows,cols)))
generated_pattern = self.generator(bounds=bb,xdensity=1,ydensity=1).transpose()
new_pattern = self.operator(x, generated_pattern)
x *= 0.0
x += new_pattern
# Not suitable for basic.py due to its dependence on patterns.
class KernelMax(TransferFn):
"""
Replaces the given matrix with a kernel function centered around the maximum value.
This operation is usually part of the Kohonen SOM algorithm, and
approximates a series of lateral interactions resulting in a
single activity bubble.
The radius of the kernel (i.e. the surround) is specified by the
parameter 'radius', which should be set before using __call__.
The shape of the surround is determined by the
neighborhood_kernel_generator, and can be any PatternGenerator
instance, or any function accepting bounds, density, radius, and
height to return a kernel matrix.
"""
kernel_radius = param.Number(default=0.0,bounds=(0,None),doc="""
Kernel radius in Sheet coordinates.""")
neighborhood_kernel_generator = param.ClassSelector(imagen.PatternGenerator,
default=imagen.Gaussian(x=0.0,y=0.0,aspect_ratio=1.0),
doc="Neighborhood function")
crop_radius_multiplier = param.Number(default=3.0,doc="""
Factor by which the radius should be multiplied, when deciding
how far from the winner to keep evaluating the kernel.""")
density=param.Number(1.0,bounds=(0,None),doc="""
Density of the Sheet whose matrix we act on, for use
in converting from matrix to Sheet coordinates.""")
def __call__(self,x):
rows,cols = x.shape
radius = self.density*self.kernel_radius
crop_radius = int(max(1.25,radius*self.crop_radius_multiplier))
# find out the matrix coordinates of the winner
wr,wc = array_argmax(x)
# convert to sheet coordinates
wy = rows-wr-1
# Optimization: Calculate the bounding box around the winner
# in which weights will be changed
cmin = max(wc-crop_radius, 0)
cmax = min(wc+crop_radius+1,cols)
rmin = max(wr-crop_radius, 0)
rmax = min(wr+crop_radius+1,rows)
ymin = max(wy-crop_radius, 0)
ymax = min(wy+crop_radius+1,rows)
bb = BoundingBox(points=((cmin,ymin), (cmax,ymax)))
# generate the kernel matrix and insert it into the correct
# part of the output array
kernel = self.neighborhood_kernel_generator(bounds=bb,xdensity=1,ydensity=1,
size=2*radius,x=wc+0.5,y=wy+0.5)
x *= 0.0
x[rmin:rmax,cmin:cmax] = kernel
class HalfRectify(TransferFn):
"""
Transfer function that applies a half-wave rectification (clips at zero)
"""
t_init = param.Number(default=0.0,doc="""
The initial value of threshold at which output becomes non-zero..""")
gain = param.Number(default=1.0,doc="""
The neuronal gain""")
randomized_init = param.Boolean(False,doc="""
Whether to randomize the initial t parameter.""")
noise_magnitude = param.Number(default=0.1,doc="""
The magnitude of the additive noise to apply to the t_init
parameter at initialization.""")
def __init__(self,**params):
super(TransferFn,self).__init__(**params)
self.first_call = True
def __call__(self,x):
if self.first_call:
self.first_call = False
if self.randomized_init:
self.t = np.ones(x.shape, x.dtype.char) * self.t_init + \
(imagen.random.UniformRandom() \
(xdensity=x.shape[0],ydensity=x.shape[1])-0.5) * \
self.noise_magnitude*2
else:
self.t = np.ones(x.shape, x.dtype.char) * self.t_init
x -= self.t
clip_lower(x,0)
x *= self.gain
class TemporalScatter(TransferFnWithState):
"""
Scatter values across time using a specified distribution,
discretized into a symmetric interval around zero. This class is
still work in progress as part of the TCAL model.
As no notion of time exists at the level of transfer functions
(state is changes according to call count), this class assumes a
fixed, 'clocked' timestep exists between subsequent calls.
Internally, an activity buffer is computed with a depth
corresponding to the number of timestep intervals with the stated
span value.
Note that the transfer function only has the power to delay
output. Therefore the central peak of a unimodal, zero-mean
distribution will occur *after* the time 'span' has elapsed.
In addition it is very *important* to view the depth map using the
view_depth_map method: if the majority of the values generated by
the distribution are outside the chosen span, values smaller and
larger than the span will be lumped into the first and last bins
respectively, distorting the shape of the intended distribution.
"""
timestep = param.Number(default=5, doc="""
The timestep interval in milliseconds. This value is used to
compute the depth and sample the supplied distribution.
Note that value must be specified some some extenal source and
there is no way to ensure that subsequent calls are regular
with the stated interval.
""")
distribution = param.ClassSelector(imagen.PatternGenerator,
default=imagen.random.GaussianRandom(offset=0.0, scale=30),
doc="""
The pattern generator that defines the scatter distribution in
milliseconds. Any random distribution may be used
e.g. UniformRandom or GaussianRandom. Note that the discretized
binning with the given 'span' is zero-centered.
In other words, even if a distribution is not-symmetric
(i.e. skewed), binning will occur around a symmetric interval
around zero with a total extent given by the span.
""")
span = param.Number(default=120, allow_None=True, bounds=(0,None), doc="""
The input distribution is expected to be continuous and may be
unbounded. For instance, a Gaussian distribution may generate
sample values that are unbounded in both the positive and
negative direction.
The span parameter determines the size of the (zero-centered)
interval which is binned.""")
def __init__(self, **params):
super(TemporalScatter,self).__init__(**params)
self.limits = (-self.span/2.0, self.span/2.0)
self._depth = None
self.first_call = True
self.raw_depth_map = None # The raw depth map (float values)
self.depth_map = None # The discretized depth map (ints)
self._buffer = None # The activity buffer
self.__current_state_stack=[]
def view_depth_map(self, mode='both'):
"""
Visualize the depth map using holoviews, including
distribution histograms.
Mode may be one of 'discrete', 'raw' or 'both':
* The 'discrete' mode presents the depth map used by
TemporalScatter, showing the latency at which
TemporalScatter will propagate the input i.e. discrete,
positive latencies in milliseconds.
* The 'raw' mode shows the continuous distribution before
discretization. This is typically a zero-mean, zero-centered
distribution i.e a continuous, zero-centered latency
distribution.
* Both presents both of the above types together (default).
"""
views = []
if mode in ['raw', 'both']:
views.append(Image(self.raw_depth_map, group='Pattern',
label='Raw Depth map').hist())
if mode in ['discrete', 'both']:
scaled_map = (self.depth_map * self.timestep)
discrete_sv = Image(scaled_map, group='Pattern',
label='Depth map')
views.append(discrete_sv.hist(num_bins=self.depth,
bin_range=(0, self.span)))
return views[0] if len(views)==1 else views[0]+views[1]
@property
def depth(self):
"""
The depth of the activity buffer.
"""
if self._depth:
return self._depth
if not (self.span // self.timestep) or (self.span % self.timestep):
raise Exception("The span of the specified limits must be"
" an exact, *positive*, multiple of timestep")
self._depth = self.span // self.timestep
return self._depth
def _compute_depth_map(self, shape):
(d1,d2) = shape
(min_lim, max_lim) = self.limits
self.raw_depth_map = self.distribution(name='ScatterDepth',
xdensity=d1, ydensity=d2,
bounds=BoundingBox(radius=0.5))
bin_edges = list(np.linspace(min_lim, max_lim, self.depth))
discretized = np.digitize(self.raw_depth_map.flatten(), bin_edges)
# Out of bounds bins (to +inf) need to be pulled back in.
discretized[discretized==len(bin_edges)]=len(bin_edges)-1
return discretized.reshape(*shape)
def __call__(self,x):
(d1,d2) = x.shape
if self.first_call is True:
# The buffer is a 3D array containing a stack of activities
self._buffer = np.zeros((d1,d2,self.depth))
self.depth_map = self._compute_depth_map(x.shape)
self.first_call = False
# Roll the buffer and copy x to the top of the stack
self._buffer =np.roll(self._buffer,1,axis=2)
self._buffer[...,0] = x
x.fill(0.0)
x += self._buffer[np.arange(d1)[:, None],
np.arange(d2),
self.depth_map]
return x
def state_push(self):
self.__current_state_stack.append((copy.copy(self._buffer),
copy.copy(self.first_call)))
super(TemporalScatter,self).state_push()
if self._buffer is not None:
self._buffer *= 0.0
def state_pop(self):
self._buffer,self.first_call = self.__current_state_stack.pop()
super(TemporalScatter,self).state_pop()
class AdaptiveThreshold(TransferFnWithState):
"""
Adapts the parameters of a linear threshold function to maintain a
constant desired average activity. Defined in:
Jean-Luc R. Stevens, Judith S. Law, Jan Antolik, and James A. Bednar.
Mechanisms for stable, robust, and adaptive development of orientation
maps in the primary visual cortex.
Journal of Neuroscience 33:15747-15766, 2013.
http://dx.doi.org/10.1523/JNEUROSCI.1037-13.2013
"""
t_init = param.Number(default=0.15,doc="""
Initial value of the threshold value t.""")
randomized_init = param.Boolean(False,doc="""
Whether to randomize the initial t parameter.""")
seed = param.Integer(default=42, doc="""
Random seed used to control the initial randomized t values.""")
target_activity = param.Number(default=0.024,doc="""
The target average activity.""")
linear_slope = param.Number(default=1.0,doc="""
Slope of the linear portion above threshold.""")
learning_rate = param.Number(default=0.01,doc="""
Learning rate for homeostatic plasticity.""")
smoothing = param.Number(default=0.991,doc="""
Weighting of previous activity vs. current activity when
calculating the average activity.""")
noise_magnitude = param.Number(default=0.1,doc="""
The magnitude of the additive noise to apply to the t_init
parameter at initialization.""")
period = param.Number(default=1.0, constant=True, doc="""
How often the threshold should be adjusted.
If the period is 0, the threshold is adjusted continuously, each
time this TransferFn is called.
For nonzero periods, adjustments occur only the first time
this TransferFn is called after topo.sim.time() reaches an
integer multiple of the period.
For example, if period is 2.5 and the TransferFn is evaluated
every 0.05 simulation time units, the threshold will be
adjusted at times 2.55, 5.05, 7.55, etc.""")
def __init__(self,**params):
super(HomeostaticResponse,self).__init__(**params)
self.first_call = True
self.__current_state_stack=[]
self.t=None # To allow state_push at init
self.y_avg=None # To allow state_push at init
next_timestamp = topo.sim.time() + self.period
self._next_update_timestamp = topo.sim.convert_to_time_type(next_timestamp)
self._y_avg_prev = None
self._x_prev = None
def _initialize(self,x):
self._x_prev = np.copy(x)
self._y_avg_prev = np.ones(x.shape, x.dtype.char) * self.target_activity
if self.randomized_init:
self.t = np.ones(x.shape, x.dtype.char) * self.t_init + \
(imagen.random.UniformRandom( \
random_generator=np.random.RandomState(seed=self.seed)) \
(xdensity=x.shape[0],ydensity=x.shape[1]) \
-0.5)*self.noise_magnitude*2
else:
self.t = np.ones(x.shape, x.dtype.char) * self.t_init
self.y_avg = np.ones(x.shape, x.dtype.char) * self.target_activity
def _apply_threshold(self,x):
"""Applies the piecewise-linear thresholding operation to the activity."""
x -= self.t
clip_lower(x,0)
if self.linear_slope != 1.0:
x *= self.linear_slope
def _update_threshold(self, prev_t, x, prev_avg, smoothing, learning_rate, target_activity):
"""
Applies exponential smoothing to the given current activity and previous
smoothed value following the equations given in the report cited above.
If plastic is set to False, the running exponential average
values and thresholds are not updated.
"""
y_avg = (1.0-smoothing)*x + smoothing*prev_avg
t = prev_t + learning_rate * (y_avg - target_activity)
return (y_avg, t) if self.plastic else (prev_avg, prev_t)
def __call__(self,x):
"""Initialises on the first call and then applies homeostasis."""
if self.first_call: self._initialize(x); self.first_call = False
if (topo.sim.time() > self._next_update_timestamp):
self._next_update_timestamp += self.period
# Using activity matrix and and smoothed activity from *previous* call.
(self.y_avg, self.t) = self._update_threshold(self.t, self._x_prev, self._y_avg_prev,
self.smoothing, self.learning_rate,
self.target_activity)
self._y_avg_prev = self.y_avg # Copy only if not in continuous mode
self._apply_threshold(x) # Apply the threshold only after it is updated
self._x_prev[:] = x[:] # Recording activity for the next periodic update
def state_push(self):
self.__current_state_stack.append((copy.copy(self.t),
copy.copy(self.y_avg),
copy.copy(self.first_call),
copy.copy(self._next_update_timestamp),
copy.copy(self._y_avg_prev),
copy.copy(self._x_prev)))
super(HomeostaticResponse, self).state_push()
def state_pop(self):
(self.t, self.y_avg, self.first_call, self._next_update_timestamp,
self._y_avg_prev, self._x_prev) = self.__current_state_stack.pop()
super(HomeostaticResponse, self).state_pop()
# Old alias for AdaptiveThreshold
HomeostaticResponse = AdaptiveThreshold
class AttributeTrackingTF(TransferFnWithState):
"""
Keeps track of attributes of a specified Parameterized over time, for analysis or plotting.
Useful objects to track include sheets (e.g. "topo.sim['V1']"),
projections ("topo.sim['V1'].projections['LateralInhibitory']"),
or an output_function.
Any attribute whose value is a matrix the same size as the
activity matrix can be tracked. Only specified units within this
matrix will be tracked.
If no object is specified, this function will keep track of the
incoming activity over time.
The results are stored in a dictionary named 'values', as (time,
value) pairs indexed by the parameter name and unit. For
instance, if the value of attribute 'x' is v for unit (0.0,0.0)
at time t, values['x'][(0.0,0.0)]=(t,v).
Updating of the tracked values can be disabled temporarily using
the plastic parameter.
"""
# ALERT: Need to make this read-only, because it can't be changed
# after instantiation unless _object is also changed. Or else
# need to make _object update whenever object is changed and
# _object has already been set.
object = param.Parameter(default=None, doc="""
Parameterized instance whose parameters will be tracked.
If this parameter's value is a string, it will be evaluated first
(by calling Python's eval() function). This feature is designed to
allow circular references, so that the TF can track the object that
owns it, without causing problems for recursive traversal (as for
script_repr()).""")
# There may be some way to achieve the above without using eval(), which would be better.
#JLALERT When using this function snapshots cannot be saved because of problem with eval()
attrib_names = param.List(default=[], doc="""
List of names of the function object's parameters that should be stored.""")
units = param.List(default=[(0.0,0.0)], doc="""
Sheet coordinates of the unit(s) for which parameter values will be stored.""")
step = param.Number(default=1, doc="""
How often to update the tracked values.
For instance, step=1 means to update them every time this TF is
called; step=2 means to update them every other time.""")
coordframe = param.Parameter(default=None,doc="""
SheetCoordinateSystem to use to convert the position into matrix coordinates.
If this parameter's value is a string, it will be evaluated
first(by calling Python's eval() function). This feature is
designed to allow circular references, so that the TF can
track the object that owns it, without causing problems for
recursive traversal (as for script_repr()).""")
def __init__(self,**params):
super(AttributeTrackingTF,self).__init__(**params)
self.values={}
self.n_step = 0
self._object=None
self._coordframe=None
for p in self.attrib_names:
self.values[p]={}
for u in self.units:
self.values[p][u]=[]
def __call__(self,x):
if self._object==None:
if isinstance(self.object,str):
self._object=eval(self.object)
else:
self._object=self.object
if self._coordframe == None:
if isinstance(self.coordframe,str) and isinstance(self._object,SheetCoordinateSystem):
raise ValueError(str(self._object)+"is already a coordframe, no need to specify coordframe")
elif isinstance(self._object,SheetCoordinateSystem):
self._coordframe=self._object
elif isinstance(self.coordframe,str):
self._coordframe=eval(self.coordframe)
else:
raise ValueError("A coordinate frame (e.g. coordframe=topo.sim['V1']) must be specified in order to track"+str(self._object))
#collect values on each appropriate step
self.n_step += 1
if self.n_step == self.step:
self.n_step = 0
if self.plastic:
for p in self.attrib_names:
if p=="x":
value_matrix=x
else:
value_matrix= getattr(self._object, p)
for u in self.units:
mat_u=self._coordframe.sheet2matrixidx(u[0],u[1])
self.values[p][u].append((topo.sim.time(),value_matrix[mat_u]))
__all__ = list(set([k for k,v in locals().items() if isinstance(v,type) and issubclass(v,TransferFn)]))
__all__.remove("TransferFn")
__all__.remove("TransferFnWithState")
| bsd-3-clause |
sephii/django | tests/template_tests/syntax_tests/test_ssi.py | 3 | 3103 | import os
from django.test import ignore_warnings, SimpleTestCase
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from ..utils import ROOT, setup
@ignore_warnings(category=RemovedInDjango20Warning)
class SsiTagTests(SimpleTestCase):
# Test normal behavior
@setup({'ssi01': '{%% ssi "%s" %%}' % os.path.join(
ROOT, 'templates', 'ssi_include.html',
)})
def test_ssi01(self):
output = self.engine.render_to_string('ssi01')
self.assertEqual(output, 'This is for testing an ssi include. {{ test }}\n')
@setup({'ssi02': '{%% ssi "%s" %%}' % os.path.join(
ROOT, 'not_here',
)})
def test_ssi02(self):
output = self.engine.render_to_string('ssi02')
self.assertEqual(output, ''),
@setup({'ssi03': "{%% ssi '%s' %%}" % os.path.join(
ROOT, 'not_here',
)})
def test_ssi03(self):
output = self.engine.render_to_string('ssi03')
self.assertEqual(output, ''),
# Test passing as a variable
@ignore_warnings(category=RemovedInDjango19Warning)
@setup({'ssi04': '{% load ssi from future %}{% ssi ssi_file %}'})
def test_ssi04(self):
output = self.engine.render_to_string('ssi04', {
'ssi_file': os.path.join(ROOT, 'templates', 'ssi_include.html')
})
self.assertEqual(output, 'This is for testing an ssi include. {{ test }}\n')
@ignore_warnings(category=RemovedInDjango19Warning)
@setup({'ssi05': '{% load ssi from future %}{% ssi ssi_file %}'})
def test_ssi05(self):
output = self.engine.render_to_string('ssi05', {'ssi_file': 'no_file'})
self.assertEqual(output, '')
# Test parsed output
@setup({'ssi06': '{%% ssi "%s" parsed %%}' % os.path.join(
ROOT, 'templates', 'ssi_include.html',
)})
def test_ssi06(self):
output = self.engine.render_to_string('ssi06', {'test': 'Look ma! It parsed!'})
self.assertEqual(output, 'This is for testing an ssi include. '
'Look ma! It parsed!\n')
@setup({'ssi07': '{%% ssi "%s" parsed %%}' % os.path.join(
ROOT, 'not_here',
)})
def test_ssi07(self):
output = self.engine.render_to_string('ssi07', {'test': 'Look ma! It parsed!'})
self.assertEqual(output, '')
# Test space in file name
@setup({'ssi08': '{%% ssi "%s" %%}' % os.path.join(
ROOT, 'templates', 'ssi include with spaces.html',
)})
def test_ssi08(self):
output = self.engine.render_to_string('ssi08')
self.assertEqual(output, 'This is for testing an ssi include '
'with spaces in its name. {{ test }}\n')
@setup({'ssi09': '{%% ssi "%s" parsed %%}' % os.path.join(
ROOT, 'templates', 'ssi include with spaces.html',
)})
def test_ssi09(self):
output = self.engine.render_to_string('ssi09', {'test': 'Look ma! It parsed!'})
self.assertEqual(output, 'This is for testing an ssi include '
'with spaces in its name. Look ma! It parsed!\n')
| bsd-3-clause |
tersmitten/ansible | lib/ansible/modules/cloud/openstack/os_server_group.py | 52 | 4701 | #!/usr/bin/python
# Copyright (c) 2016 Catalyst IT Limited
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_group
short_description: Manage OpenStack server groups
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Lingxian Kong (@kong)"
description:
- Add or remove server groups from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(policies) is required.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Server group name.
required: true
policies:
description:
- A list of one or more policy names to associate with the server
group. The list must contain at least one policy name. The current
valid policy names are anti-affinity, affinity, soft-anti-affinity
and soft-affinity.
required: false
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a server group with 'affinity' policy.
- os_server_group:
state: present
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
name: my_server_group
policies:
- affinity
# Delete 'my_server_group' server group.
- os_server_group:
state: absent
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
name: my_server_group
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: str
name:
description: The name of the server group.
returned: success
type: str
policies:
description: A list of one or more policy names of the server group.
returned: success
type: list
members:
description: A list of members in the server group.
returned: success
type: list
metadata:
description: Metadata key and value pairs.
returned: success
type: dict
project_id:
description: The project ID who owns the server group.
returned: success
type: str
user_id:
description: The user ID who owns the server group.
returned: success
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, server_group):
if state == 'present' and not server_group:
return True
if state == 'absent' and server_group:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
policies=dict(required=False, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
**module_kwargs
)
name = module.params['name']
policies = module.params['policies']
state = module.params['state']
sdk, cloud = openstack_cloud_from_module(module)
try:
server_group = cloud.get_server_group(name)
if module.check_mode:
module.exit_json(
changed=_system_state_change(state, server_group)
)
changed = False
if state == 'present':
if not server_group:
if not policies:
module.fail_json(
msg="Parameter 'policies' is required in Server Group "
"Create"
)
server_group = cloud.create_server_group(name, policies)
changed = True
module.exit_json(
changed=changed,
id=server_group['id'],
server_group=server_group
)
if state == 'absent':
if server_group:
cloud.delete_server_group(server_group['id'])
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
| gpl-3.0 |
lokik/sfepy | tests/test_io.py | 5 | 2368 | from __future__ import absolute_import
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
import numpy as nm
import scipy.sparse as sp
import os.path as op
##
# 02.07.2007, c
class Test( TestCommon ):
##
# 02.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 02.07.2007, r: 12.06.2008
def test_sparse_matrix_hdf5( self ):
from sfepy.base.ioutils import write_sparse_matrix_hdf5, read_sparse_matrix_hdf5
from sfepy.base.ioutils import pt
if pt is None:
self.report( 'skipped (no pytables)' )
return True
filename = op.join( self.options.out_dir, 'mtx.h5' )
aux = nm.random.rand( 5, 5 )
aux[1,:] = aux[:,2] = aux[3,:] = 0.0
mtx = sp.csr_matrix( aux, dtype = nm.float64 )
# self.report( 'sparse matrix:\n%s' % mtx )
self.report( 'saving matrix into %s...' % filename )
write_sparse_matrix_hdf5( filename, mtx )
self.report( 'reading...' )
mtx2 = read_sparse_matrix_hdf5( filename )
# self.report( 'read matrix:\n%s' % mtx2 )
self.report( 'difference:\n%s' % (mtx2 - mtx).__repr__() )
assert_( mtx.shape == mtx2.shape )
assert_( mtx.dtype == mtx2.dtype )
assert_( mtx.format == mtx2.format )
assert_( nm.allclose( mtx.data, mtx2.data ) )
assert_( nm.allclose( mtx.indices, mtx2.indices ) )
assert_( nm.allclose( mtx.indptr, mtx2.indptr ) )
return True
##
# c: 09.07.2007, r: 12.06.2008
def test_recursive_dict_hdf5( self ):
from sfepy.base.ioutils import write_dict_hdf5, read_dict_hdf5
from sfepy.base.ioutils import pt
if pt is None:
self.report( 'skipped (no pytables)' )
return True
filename = op.join( self.options.out_dir, 'dict.h5' )
test = {'A' : 0, 'B' : {'C' : [0, 1],
'D' : {'E' : {'F' : {'G' : 2.0}}}}}
self.report( '%s' % test )
self.report( 'saving into %s...' % filename )
write_dict_hdf5( filename, test )
self.report( 'reading...' )
test2 = read_dict_hdf5( filename )
self.report( '%s' % test2 )
assert_( test == test2 )
return True
| bsd-3-clause |
SexualHealthInnovations/callisto-core | callisto_core/accounts/migrations/0004_encrypt_user_data.py | 2 | 1490 | # Generated by Django 2.0.4 on 2018-04-11 21:12
import logging
from hashlib import sha256
import bcrypt
from django.db import migrations, models
from callisto_core.accounts.auth import index
logger = logging.getLogger(__name__)
def encrypt_user_data(apps, schema_editor):
Account = apps.get_model("accounts.Account")
for account in Account.objects.all():
username = account.user.username
# sha256 + bcrypt matches our current state of the art in other
# platforms.
userhash = sha256(username.lower().encode("utf-8")).hexdigest()
usercrypt = bcrypt.hashpw(userhash.encode("utf-8"), bcrypt.gensalt())
userindex = index(userhash)
account.encrypted_username = usercrypt.decode()
account.username_index = userindex
email = account.user.email
if email:
# sha256 + bcrypt matches our current state of the art in other
# platforms.
emailhash = sha256(email.lower().encode("utf-8")).hexdigest()
emailcrypt = bcrypt.hashpw(emailhash.encode("utf-8"), bcrypt.gensalt())
emailindex = index(emailhash)
account.encrypted_email = emailcrypt.decode()
account.email_index = emailindex
account.save()
class Migration(migrations.Migration):
dependencies = [("accounts", "0003_auto_20190607_1540")]
operations = [
migrations.RunPython(encrypt_user_data, reverse_code=migrations.RunPython.noop)
]
| agpl-3.0 |
tjanez/ansible | lib/ansible/inventory/__init__.py | 32 | 34639 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import subprocess
import sys
import re
import itertools
from ansible.compat.six import string_types, iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.dir import InventoryDirectory, get_file_parser
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import vars_loader
from ansible.utils.vars import combine_vars
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
HOSTS_PATTERNS_CACHE = {}
class Inventory(object):
"""
Host inventory for ansible.
"""
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = unfrackpath(host_list, follow=False)
self._loader = loader
self._variable_manager = variable_manager
self.localhost = None
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._pattern_cache = {}
self._group_dict_cache = {}
self._vars_plugins = []
self._basedir = self.basedir()
# Contains set of filenames under group_vars directories
self._group_vars_files = self._find_group_vars_files(self._basedir)
self._host_vars_files = self._find_host_vars_files(self._basedir)
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = {}
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# clear the cache here, which is only useful if more than
# one Inventory objects are created when using the API directly
self.clear_pattern_cache()
self.clear_group_dict_cache()
self.parse_inventory(host_list)
def serialize(self):
data = dict()
return data
def deserialize(self, data):
pass
def parse_inventory(self, host_list):
if isinstance(host_list, string_types):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
self.parser = None
# Always create the 'all' and 'ungrouped' groups, even if host_list is
# empty: in this case we will subsequently an the implicit 'localhost' to it.
ungrouped = Group('ungrouped')
all = Group('all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
if host_list is None:
pass
elif isinstance(host_list, list):
for h in host_list:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
new_host = Host(host, port)
if h in C.LOCALHOST:
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if self.localhost is not None:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
display.vvvv("Set default localhost to %s" % h)
self.localhost = new_host
all.add_host(new_host)
elif self._loader.path_exists(host_list):
# TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
if self.is_directory(host_list):
# Ensure basedir is inside the directory
host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
else:
self.parser = get_file_parser(host_list, self.groups, self._loader)
vars_loader.add_directory(self._basedir, with_subdir=True)
if not self.parser:
# should never happen, but JIC
raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
else:
display.warning("Host file not found: %s" % to_text(host_list))
self._vars_plugins = [ x for x in vars_loader.all(self) ]
# set group vars from group_vars/ files and vars plugins
for g in self.groups:
group = self.groups[g]
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
self.get_group_vars(group)
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True):
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
self.get_host_vars(host)
def _match(self, str, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
except Exception:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
def _match_list(self, items, item_attr, pattern_str):
results = []
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
for item in items:
if pattern.match(getattr(item, item_attr)):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Check if pattern already computed
if isinstance(pattern, list):
pattern_hash = u":".join(pattern)
else:
pattern_hash = pattern
if not ignore_limits and self._subset:
pattern_hash += u":%s" % to_text(self._subset)
if not ignore_restrictions and self._restriction:
pattern_hash += u":%s" % to_text(self._restriction)
if pattern_hash not in HOSTS_PATTERNS_CACHE:
patterns = Inventory.split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset = self._evaluate_patterns(self._subset)
hosts = [ h for h in hosts if h in subset ]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [ h for h in hosts if h.name in self._restriction ]
seen = set()
HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
return HOSTS_PATTERNS_CACHE[pattern_hash][:]
@classmethod
def split_host_pattern(cls, pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(cls.split_host_pattern, pattern)))
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
elif ',' in pattern:
patterns = re.split('\s*,\s*', pattern)
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
@classmethod
def order_patterns(cls, patterns):
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = Inventory.order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._hosts_cache:
hosts.append(self.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
hosts.extend(to_append)
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts)-1
return hosts[start:end+1]
else:
return [ hosts[start] ]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
hostnames = set()
def __append_host_to_results(host):
if host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
for group in groups.values():
if pattern == 'all':
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
for host in group.get_hosts():
if host.implicit:
continue
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
for host in matching_hosts:
__append_host_to_results(host)
if pattern in C.LOCALHOST and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
def _create_implicit_localhost(self, pattern):
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.implicit = True
new_host.vars = self.get_host_vars(new_host)
new_host.set_variable("ansible_connection", "local")
if "ansible_python_interpreter" not in new_host.vars:
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. #13585
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default.'
' You can correct this by setting ansible_python_interpreter for localhost')
py_interp = '/usr/bin/python'
new_host.set_variable("ansible_python_interpreter", py_interp)
self.get_group("ungrouped").add_host(new_host)
self.localhost = new_host
return new_host
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
global HOSTS_PATTERNS_CACHE
HOSTS_PATTERNS_CACHE = {}
self._pattern_cache = {}
def clear_group_dict_cache(self):
''' called exclusively by the add_host and group_by plugins '''
self._group_dict_cache = {}
def groups_for_host(self, host):
if host in self._hosts_cache:
return self._hosts_cache[host].get_groups()
else:
return []
def get_groups(self):
return self.groups
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
return self._hosts_cache[hostname]
def _get_host(self, hostname):
matching_host = None
if hostname in C.LOCALHOST:
if self.localhost:
matching_host= self.localhost
else:
for host in self.get_group('all').get_hosts():
if host.name in C.LOCALHOST:
matching_host = host
break
if not matching_host:
matching_host = self._create_implicit_localhost(hostname)
# update caches
self._hosts_cache[hostname] = matching_host
for host in C.LOCALHOST.difference((hostname,)):
self._hosts_cache[host] = self._hosts_cache[hostname]
else:
for group in self.groups.values():
for host in group.get_hosts():
if host not in self._hosts_cache:
self._hosts_cache[host.name] = host
if hostname == host.name:
matching_host = host
return matching_host
def get_group(self, groupname):
return self.groups.get(groupname)
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
if groupname not in self._vars_per_group or update_cached:
self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
return self._vars_per_group[groupname]
def _get_group_variables(self, groupname, vault_password=None):
group = self.get_group(groupname)
if group is None:
raise Exception("group not found: %s" % groupname)
vars = {}
# plugin.get_group_vars retrieves just vars for specific group
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# Read group_vars/ files
vars = combine_vars(vars, self.get_group_vars(group))
return vars
def get_group_dict(self):
"""
In get_vars() we merge a 'magic' dictionary 'groups' with group name
keys and hostname list values into every host variable set.
Cache the creation of this structure here
"""
if not self._group_dict_cache:
for (group_name, group) in iteritems(self.groups):
self._group_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._group_dict_cache
def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
raise AnsibleError("no vars as host is not in inventory: %s" % hostname)
return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
if hostname not in self._vars_per_host or update_cached:
self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
return self._vars_per_host[hostname]
def _get_host_variables(self, hostname, vault_password=None):
host = self.get_host(hostname)
if host is None:
raise AnsibleError("no host vars as host is not in inventory: %s" % hostname)
vars = {}
# plugin.run retrieves all vars (also from groups) for host
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
vars = combine_vars(vars, self.parser.get_host_variables(host))
return vars
def add_group(self, group):
if group.name not in self.groups:
self.groups[group.name] = group
else:
raise AnsibleError("group already in inventory: %s" % group.name)
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
return sorted(self.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [ restriction ]
self._restriction = [ h.name for h in restriction ]
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = Inventory.split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def is_file(self):
"""
Did inventory come from a file? We don't use the equivalent loader
methods in inventory, due to the fact that the loader does an implict
DWIM on the path, which may be incorrect for inventory paths relative
to the playbook basedir.
"""
if not isinstance(self.host_list, string_types):
return False
return os.path.isfile(self.host_list) or self.host_list == os.devnull
def is_directory(self, path):
"""
Is the inventory host list a directory? Same caveat for here as with
the is_file() method above.
"""
if not isinstance(self.host_list, string_types):
return False
return os.path.isdir(path)
def basedir(self):
""" if inventory came from a file, what's the directory? """
dname = self.host_list
if self.is_directory(self.host_list):
dname = self.host_list
elif not self.is_file():
dname = None
else:
dname = os.path.dirname(self.host_list)
if dname is None or dname == '' or dname == '.':
dname = os.getcwd()
if dname:
dname = os.path.abspath(dname)
return dname
def src(self):
""" if inventory came from a file, what's the directory and file name? """
if not self.is_file():
return None
return self.host_list
def playbook_basedir(self):
""" returns the directory of the current playbook """
return self._playbook_basedir
def set_playbook_basedir(self, dir_name):
"""
sets the base directory of the playbook so inventory can use it as a
basedir for host_ and group_vars, and other things.
"""
# Only update things if dir is a different playbook basedir
if dir_name != self._playbook_basedir:
# we're changing the playbook basedir, so if we had set one previously
# clear the host/group vars entries from the VariableManager so they're
# not incorrectly used by playbooks from different directories
if self._playbook_basedir:
self._variable_manager.clear_playbook_hostgroup_vars_files(self._playbook_basedir)
self._playbook_basedir = dir_name
# get group vars from group_vars/ files
# TODO: excluding the new_pb_basedir directory may result in group_vars
# files loading more than they should, however with the file caching
# we do this shouldn't be too much of an issue. Still, this should
# be fixed at some point to allow a "first load" to touch all of the
# directories, then later runs only touch the new basedir specified
found_group_vars = self._find_group_vars_files(self._playbook_basedir)
if found_group_vars:
self._group_vars_files = self._group_vars_files.union(found_group_vars)
for group in self.groups.values():
self.get_group_vars(group)
found_host_vars = self._find_host_vars_files(self._playbook_basedir)
if found_host_vars:
self._host_vars_files = self._host_vars_files.union(found_host_vars)
# get host vars from host_vars/ files
for host in self.get_hosts():
self.get_host_vars(host)
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
def get_host_vars(self, host, new_pb_basedir=False, return_results=False):
""" Read host_vars/ files """
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir, return_results=return_results)
def get_group_vars(self, group, new_pb_basedir=False, return_results=False):
""" Read group_vars/ files """
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir, return_results=return_results)
def _find_group_vars_files(self, basedir):
""" Find group_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'group_vars'))
found_vars = set()
if os.path.exists(path):
if os.path.isdir(path):
found_vars = set(os.listdir(to_text(path)))
else:
display.warning("Found group_vars that is not a directory, skipping: %s" % path)
return found_vars
def _find_host_vars_files(self, basedir):
""" Find host_vars/ files """
if basedir in ('', None):
basedir = './'
path = os.path.realpath(os.path.join(basedir, 'host_vars'))
found_vars = set()
if os.path.exists(path):
found_vars = set(os.listdir(to_text(path)))
return found_vars
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False):
"""
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
dir will win over the inventory dir if files are in both.
"""
results = {}
scan_pass = 0
_basedir = self._basedir
_playbook_basedir = self._playbook_basedir
# look in both the inventory base directory and the playbook base directory
# unless we do an update for a new playbook base dir
if not new_pb_basedir and _playbook_basedir:
basedirs = [_basedir, _playbook_basedir]
else:
basedirs = [_basedir]
for basedir in basedirs:
# this can happen from particular API usages, particularly if not run
# from /usr/bin/ansible-playbook
if basedir in ('', None):
basedir = './'
scan_pass = scan_pass + 1
# it's not an eror if the directory does not exist, keep moving
if not os.path.exists(basedir):
continue
# save work of second scan if the directories are the same
if _basedir == _playbook_basedir and scan_pass != 1:
continue
# Before trying to load vars from file, check that the directory contains relvant file names
if host is None and any(map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# load vars in dir/group_vars/name_of_group
base_path = to_text(os.path.abspath(os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='surrogate_or_strict')
host_results = self._variable_manager.add_group_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, host_results)
elif group is None and any(map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)):
# same for hostvars in dir/host_vars/name_of_host
base_path = to_text(os.path.abspath(os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='surrogate_or_strict')
group_results = self._variable_manager.add_host_vars_file(base_path, self._loader)
if return_results:
results = combine_vars(results, group_results)
# all done, results is a dictionary of variables for this particular host.
return results
def refresh_inventory(self):
self.clear_pattern_cache()
self.clear_group_dict_cache()
self._hosts_cache = {}
self._vars_per_host = {}
self._vars_per_group = {}
self.groups = {}
self.parse_inventory(self.host_list)
| gpl-3.0 |
DirtyUnicorns/android_external_chromium-org | build/android/gyp/proguard.py | 26 | 1813 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import optparse
import os
import sys
from util import build_utils
def DoProguard(options):
injars = options.input_path
outjars = options.output_path
classpath = build_utils.ParseGypList(options.classpath)
classpath = list(set(classpath))
libraryjars = ':'.join(classpath)
# proguard does its own dependency checking, which can be avoided by deleting
# the output.
if os.path.exists(options.output_path):
os.remove(options.output_path)
proguard_cmd = [options.proguard_path,
'-injars', injars,
'-outjars', outjars,
'-libraryjars', libraryjars,
'@' + options.proguard_config]
build_utils.CheckOutput(proguard_cmd, print_stdout=True)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--proguard-path',
help='Path to the proguard executable.')
parser.add_option('--input-path',
help='Path to the .jar file proguard should run on.')
parser.add_option('--output-path', help='Path to the generated .jar file.')
parser.add_option('--proguard-config',
help='Path to the proguard configuration file.')
parser.add_option('--classpath', help="Classpath for proguard.")
parser.add_option('--stamp', help='Path to touch on success.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='Ignored.')
options, _ = parser.parse_args()
DoProguard(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
sekikn/incubator-airflow | tests/providers/amazon/aws/operators/test_s3_delete_objects.py | 7 | 3919 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
import boto3
from moto import mock_s3
from airflow.providers.amazon.aws.operators.s3_delete_objects import S3DeleteObjectsOperator
class TestS3DeleteObjectsOperator(unittest.TestCase):
@mock_s3
def test_s3_delete_single_object(self):
bucket = "testbucket"
key = "path/data.txt"
conn = boto3.client('s3')
conn.create_bucket(Bucket=bucket)
conn.upload_fileobj(Bucket=bucket, Key=key, Fileobj=io.BytesIO(b"input"))
# The object should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key)
self.assertEqual(len(objects_in_dest_bucket['Contents']), 1)
self.assertEqual(objects_in_dest_bucket['Contents'][0]['Key'], key)
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_single_object", bucket=bucket, keys=key)
op.execute(None)
# There should be no object found in the bucket created earlier
self.assertFalse('Contents' in conn.list_objects(Bucket=bucket, Prefix=key))
@mock_s3
def test_s3_delete_multiple_objects(self):
bucket = "testbucket"
key_pattern = "path/data"
n_keys = 3
keys = [key_pattern + str(i) for i in range(n_keys)]
conn = boto3.client('s3')
conn.create_bucket(Bucket=bucket)
for k in keys:
conn.upload_fileobj(Bucket=bucket, Key=k, Fileobj=io.BytesIO(b"input"))
# The objects should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_pattern)
self.assertEqual(len(objects_in_dest_bucket['Contents']), n_keys)
self.assertEqual(sorted([x['Key'] for x in objects_in_dest_bucket['Contents']]), sorted(keys))
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_multiple_objects", bucket=bucket, keys=keys)
op.execute(None)
# There should be no object found in the bucket created earlier
self.assertFalse('Contents' in conn.list_objects(Bucket=bucket, Prefix=key_pattern))
@mock_s3
def test_s3_delete_prefix(self):
bucket = "testbucket"
key_pattern = "path/data"
n_keys = 3
keys = [key_pattern + str(i) for i in range(n_keys)]
conn = boto3.client('s3')
conn.create_bucket(Bucket=bucket)
for k in keys:
conn.upload_fileobj(Bucket=bucket, Key=k, Fileobj=io.BytesIO(b"input"))
# The objects should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_pattern)
self.assertEqual(len(objects_in_dest_bucket['Contents']), n_keys)
self.assertEqual(sorted([x['Key'] for x in objects_in_dest_bucket['Contents']]), sorted(keys))
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_prefix", bucket=bucket, prefix=key_pattern)
op.execute(None)
# There should be no object found in the bucket created earlier
self.assertFalse('Contents' in conn.list_objects(Bucket=bucket, Prefix=key_pattern))
| apache-2.0 |
invisiblek/android_external_skia | tools/skp/page_sets/skia_googlespreadsheet_desktop.py | 33 | 1238 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/skia_googlespreadsheet_desktop.json'
class SkiaGooglespreadsheetDesktopPageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaGooglespreadsheetDesktopPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/skia_googlespreadsheet_desktop.json')
urls_list = [
# Why: from Tom W's list.
('https://docs.google.com/spreadsheets/d/'
'1YnmSPu-p-1nj-lkWd8q_GRgzjiWzg_6A-HvFYqVoVxI/edit?usp=sharing'),
]
for url in urls_list:
self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
| bsd-3-clause |
CubicComet/exercism-python-solutions | rotational-cipher/rotational_cipher_test.py | 4 | 1544 | import unittest
import rotational_cipher
# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0
class RotationalCipher(unittest.TestCase):
def test_rotate_a_by_1(self):
self.assertEqual(rotational_cipher.rotate('a', 1), 'b')
def test_rotate_a_by_26(self):
self.assertEqual(rotational_cipher.rotate('a', 26), 'a')
def test_rotate_a_by_0(self):
self.assertEqual(rotational_cipher.rotate('a', 0), 'a')
def test_rotate_m_by_13(self):
self.assertEqual(rotational_cipher.rotate('m', 13), 'z')
def test_rotate_n_by_13_with_wrap_around_alphabet(self):
self.assertEqual(rotational_cipher.rotate('n', 13), 'a')
def test_rotate_capital_letters(self):
self.assertEqual(rotational_cipher.rotate('OMG', 5), 'TRL')
def test_rotate_spaces(self):
self.assertEqual(rotational_cipher.rotate('O M G', 5), 'T R L')
def test_rotate_numbers(self):
self.assertEqual(
rotational_cipher.rotate('Testing 1 2 3 testing', 4),
'Xiwxmrk 1 2 3 xiwxmrk')
def test_rotate_punctuation(self):
self.assertEqual(
rotational_cipher.rotate("Let's eat, Grandma!", 21),
"Gzo'n zvo, Bmviyhv!")
def test_rotate_all_letters(self):
self.assertEqual(
rotational_cipher.rotate("The quick brown fox jumps"
" over the lazy dog.", 13),
"Gur dhvpx oebja sbk whzcf bire gur ynml qbt.")
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
paasmaker/paasmaker | paasmaker/common/stats/base.py | 2 | 1797 | #
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import paasmaker
import tornado.testing
import colander
class BaseStatsConfigurationSchema(colander.MappingSchema):
# No options defined.
pass
class BaseStats(paasmaker.util.plugin.Plugin):
"""
This plugin is used to fetch stats on the node, which are
reported back to the master for informational purposes.
Additionally, these stats are used by scoring plugins to
calculate the "score" for a node. The score is used by
the Pacemaker to rank nodes when determining where to place
applications.
These plugins are called each time the node reports back
to the master node.
"""
MODES = {
paasmaker.util.plugin.MODE.NODE_STATS: None
}
OPTIONS_SCHEMA = BaseStatsConfigurationSchema()
def stats(self, existing_stats, callback):
"""
Alter or insert into the provided existing stats array. Call the callback
with the dictionary once completed.
For example::
def stats(self, existing_stats, callback):
existing_stats['my_stat'] = 1.0
callback(existing_stats)
:arg dict existing_stats: The existing stats. Insert your stats into
this dictionary.
:arg callable callback: The callback to call once done.
"""
raise NotImplementedError("You must implement stats().")
class BaseStatsTest(tornado.testing.AsyncTestCase):
def setUp(self):
super(BaseStatsTest, self).setUp()
self.configuration = paasmaker.common.configuration.ConfigurationStub(0, ['pacemaker'], io_loop=self.io_loop)
def tearDown(self):
self.configuration.cleanup(self.stop, self.stop)
self.wait()
super(BaseStatsTest, self).tearDown() | mpl-2.0 |
swt30/beets | test/test_vfs.py | 3 | 1740 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the virtual filesystem builder.."""
from __future__ import division, absolute_import, print_function
from test import _common
from test._common import unittest
from beets import library
from beets import vfs
class VFSTest(_common.TestCase):
def setUp(self):
super(VFSTest, self).setUp()
self.lib = library.Library(':memory:', path_formats=[
(u'default', u'albums/$album/$title'),
(u'singleton:true', u'tracks/$artist/$title'),
])
self.lib.add(_common.item())
self.lib.add_album([_common.item()])
self.tree = vfs.libtree(self.lib)
def test_singleton_item(self):
self.assertEqual(self.tree.dirs['tracks'].dirs['the artist'].
files['the title'], 1)
def test_album_item(self):
self.assertEqual(self.tree.dirs['albums'].dirs['the album'].
files['the title'], 2)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit |
linked67/p2pool-jug | SOAPpy/Server.py | 289 | 27143 | from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
#import xml.sax
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
kw = r._asdict()
if Config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
ordered_args = {}
named_args = {}
if Config.specialArgs:
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if Config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if Config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset=%s' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.