repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
civalin/cmdlr | src/cmdlr/loopctrl/chore.py | 1 | 2447 | """The choreographer for awaiting books."""
import asyncio
from math import ceil
from itertools import chain
class Choreographer:
"""Choreograph for awaiting books."""
def __init__(self, config, loop, aname_to_runners):
"""Prepare this choreographer."""
self.loop = loop
self.total_channel = config.book_concurrent
self.pending_aname_to_runners = aname_to_runners
self.running_aname_to_tasks = {}
def __get_analyzer_count(self):
return len(self.pending_aname_to_runners)
def __get_analyzer_channel_count(self):
return ceil(self.total_channel / self.__get_analyzer_count())
def __get_analyzer_channel_idle_count(self, aname):
analyzer_channel_count = self.__get_analyzer_channel_count()
analyzer_running_tasks = self.running_aname_to_tasks.get(aname, [])
return analyzer_channel_count - len(analyzer_running_tasks)
def __clearup_running_tasks(self):
for aname, tasks in self.running_aname_to_tasks.items():
new_tasks = [
task for task in tasks
if not task.done()]
if tasks:
self.running_aname_to_tasks[aname] = new_tasks
def __runup_new_tasks(self):
for aname, runners in self.pending_aname_to_runners.items():
idle_count = self.__get_analyzer_channel_idle_count(aname)
if idle_count >= 1:
for _ in range(idle_count):
if runners:
runner = runners.pop()
task = self.loop.create_task(runner)
if aname not in self.running_aname_to_tasks:
self.running_aname_to_tasks[aname] = []
self.running_aname_to_tasks[aname].append(task)
def __concat_running_tasks(self):
return list(chain.from_iterable(
tasks for tasks in self.running_aname_to_tasks.values()
))
async def run(self):
"""Run this choreographer."""
while True:
self.__clearup_running_tasks()
self.__runup_new_tasks()
running_tasks = self.__concat_running_tasks()
if running_tasks:
await asyncio.wait(running_tasks,
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop)
else:
return
| mit |
mrquim/repository.mrquim | plugin.video.LiveTV/resources/lib/AADecoder.py | 14 | 8517 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector for openload.io
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# by DrZ3r0
# ------------------------------------------------------------
# Modified by Shani
import re
class AADecoder(object):
def __init__(self, aa_encoded_data):
self.encoded_str = aa_encoded_data.replace('/*´∇`*/', '')
self.b = ["(c^_^o)", "(゚Θ゚)", "((o^_^o) - (゚Θ゚))", "(o^_^o)",
"(゚ー゚)", "((゚ー゚) + (゚Θ゚))", "((o^_^o) +(o^_^o))", "((゚ー゚) + (o^_^o))",
"((゚ー゚) + (゚ー゚))", "((゚ー゚) + (゚ー゚) + (゚Θ゚))", "(゚Д゚) .゚ω゚ノ", "(゚Д゚) .゚Θ゚ノ",
"(゚Д゚) ['c']", "(゚Д゚) .゚ー゚ノ", "(゚Д゚) .゚Д゚ノ", "(゚Д゚) [゚Θ゚]"]
def is_aaencoded(self):
idx = self.encoded_str.find("゚ω゚ノ= /`m´)ノ ~┻━┻ //*´∇`*/ ['_']; o=(゚ー゚) =_=3; c=(゚Θ゚) =(゚ー゚)-(゚ー゚); ")
if idx == -1:
return False
is_encoded = self.encoded_str.find("(゚Д゚)[゚o゚]) (゚Θ゚)) ('_');", idx) != -1
return is_encoded
def base_repr(self, number, base=2, padding=0):
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
base = len(digits)
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
def decode_char(self, enc_char, radix):
end_char = "+ "
str_char = ""
while enc_char != '':
found = False
# for i in range(len(self.b)):
# print self.b[i], enc_char.find(self.b[i])
# if enc_char.find(self.b[i]) == 0:
# str_char += self.base_repr(i, radix)
# enc_char = enc_char[len(self.b[i]):]
# found = True
# break
# print 'found', found, enc_char
if not found:
for i in range(len(self.b)):
enc_char = enc_char.replace(self.b[i], str(i))
# enc_char = enc_char.replace('(゚Θ゚)', '1').replace('(゚ー゚)', '4').replace('(c^_^o)', '0').replace('(o^_^o)', '3')
# print 'enc_char', enc_char
startpos = 0
findClose = True
balance = 1
result = []
if enc_char.startswith('('):
l = 0
for t in enc_char[1:]:
l += 1
# print 'looping', findClose, startpos, t, balance
if findClose and t == ')':
balance -= 1
if balance == 0:
result += [enc_char[startpos:l + 1]]
findClose = False
continue
elif not findClose and t == '(':
startpos = l
findClose = True
balance = 1
continue
elif t == '(':
balance += 1
if result is None or len(result) == 0:
return ""
else:
for r in result:
value = self.decode_digit(r, radix)
# print 'va', value
str_char += value
if value == "":
return ""
return str_char
enc_char = enc_char[len(end_char):]
return str_char
def parseJSString(self, s):
try:
# print s
# offset = 1 if s[0] == '+' else 0
tmp = (s.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')) # .replace('(','str(')[offset:])
val = int(eval(tmp))
return val
except:
pass
def decode_digit(self, enc_int, radix):
# enc_int = enc_int.replace('(゚Θ゚)', '1').replace('(゚ー゚)', '4').replace('(c^_^o)', '0').replace('(o^_^o)', '3')
# print 'enc_int before', enc_int
# for i in range(len(self.b)):
# print self.b[i], enc_char.find(self.b[i])
# if enc_char.find(self.b[i]) > 0:
# str_char += self.base_repr(i, radix)
# enc_char = enc_char[len(self.b[i]):]
# found = True
# break
# enc_int=enc_int.replace(self.b[i], str(i))
# print 'enc_int before', enc_int
try:
return str(eval(enc_int))
except: pass
rr = '(\(.+?\)\))\+'
rerr = enc_int.split('))+') # re.findall(rr, enc_int)
v = ""
# print rerr
for c in rerr:
if len(c) > 0:
# print 'v', c
if c.strip().endswith('+'):
c = c.strip()[:-1]
# print 'v', c
startbrackets = len(c) - len(c.replace('(', ''))
endbrackets = len(c) - len(c.replace(')', ''))
if startbrackets > endbrackets:
c += ')' * (startbrackets - endbrackets)
if '[' in c:
v += str(self.parseJSString(c))
else:
# print c
v += str(eval(c))
return v
# unreachable code
# mode 0=+, 1=-
# mode = 0
# value = 0
# while enc_int != '':
# found = False
# for i in range(len(self.b)):
# if enc_int.find(self.b[i]) == 0:
# if mode == 0:
# value += i
# else:
# value -= i
# enc_int = enc_int[len(self.b[i]):]
# found = True
# break
# if not found:
# return ""
# enc_int = re.sub('^\s+|\s+$', '', enc_int)
# if enc_int.find("+") == 0:
# mode = 0
# else:
# mode = 1
# enc_int = enc_int[1:]
# enc_int = re.sub('^\s+|\s+$', '', enc_int)
# return self.base_repr(value, radix)
def decode(self):
self.encoded_str = re.sub('^\s+|\s+$', '', self.encoded_str)
# get data
pattern = (r"\(゚Д゚\)\[゚o゚\]\+ (.+?)\(゚Д゚\)\[゚o゚\]\)")
result = re.search(pattern, self.encoded_str, re.DOTALL)
if result is None:
print "AADecoder: data not found"
return False
data = result.group(1)
# hex decode string
begin_char = "(゚Д゚)[゚ε゚]+"
alt_char = "(o゚ー゚o)+ "
out = ''
# print data
while data != '':
# Check new char
if data.find(begin_char) != 0:
print "AADecoder: data not found"
return False
data = data[len(begin_char):]
# Find encoded char
enc_char = ""
if data.find(begin_char) == -1:
enc_char = data
data = ""
else:
enc_char = data[:data.find(begin_char)]
data = data[len(enc_char):]
radix = 8
# Detect radix 16 for utf8 char
if enc_char.find(alt_char) == 0:
enc_char = enc_char[len(alt_char):]
radix = 16
# print repr(enc_char), radix
# print enc_char.replace('(゚Θ゚)', '1').replace('(゚ー゚)', '4').replace('(c^_^o)', '0').replace('(o^_^o)', '3')
# print 'The CHAR', enc_char, radix
str_char = self.decode_char(enc_char, radix)
if str_char == "":
print "no match : "
print data + "\nout = " + out + "\n"
return False
# print 'sofar', str_char, radix,out
out += chr(int(str_char, radix))
# print 'sfar', chr(int(str_char, radix)), out
if out == "":
print "no match : " + data
return False
return out
| gpl-2.0 |
alexpilotti/cloudbase-init | cloudbaseinit/conf/cloudstack.py | 3 | 2239 | # Copyright 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config options available for the CloudStack metadata service."""
from oslo_config import cfg
from cloudbaseinit.conf import base as conf_base
class CloudStackOptions(conf_base.Options):
"""Config options available for the CloudStack metadata service."""
def __init__(self, config):
super(CloudStackOptions, self).__init__(config, group="cloudstack")
self._options = [
cfg.StrOpt(
"metadata_base_url", default="http://10.1.1.1/",
help="The base URL where the service looks for metadata",
deprecated_name="cloudstack_metadata_ip",
deprecated_group="DEFAULT"),
cfg.IntOpt(
"password_server_port", default=8080,
help="The port number used by the Password Server."
),
cfg.BoolOpt(
"https_allow_insecure", default=False,
help="Whether to disable the validation of HTTPS "
"certificates."),
cfg.StrOpt(
"https_ca_bundle", default=None,
help="The path to a CA_BUNDLE file or directory with "
"certificates of trusted CAs."),
]
def register(self):
"""Register the current options to the global ConfigOpts object."""
group = cfg.OptGroup(self.group_name, title='CloudStack Options')
self._config.register_group(group)
self._config.register_opts(self._options, group=group)
def list(self):
"""Return a list which contains all the available options."""
return self._options
| apache-2.0 |
subeax/grab | grab/djangoui/grabstat/migrations/0002_auto__del_taskresult__add_task.py | 2 | 4288 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TaskResult'
db.delete_table(u'grabstat_taskresult')
# Adding model 'Task'
db.create_table(u'grabstat_task', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('record_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('start_time', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('stop_time', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=10, db_index=True, blank=True)),
('error_traceback', self.gf('django.db.models.fields.TextField')(blank=True)),
('spider_stats', self.gf('django.db.models.fields.TextField')(blank=True)),
('spider_timing', self.gf('django.db.models.fields.TextField')(blank=True)),
('work_time', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('pid', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('task_name', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
))
db.send_create_signal(u'grabstat', ['Task'])
def backwards(self, orm):
# Adding model 'TaskResult'
db.create_table(u'grabstat_taskresult', (
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=10, blank=True, db_index=True)),
('error_traceback', self.gf('django.db.models.fields.TextField')(blank=True)),
('spider_timing', self.gf('django.db.models.fields.TextField')(blank=True)),
('start_time', self.gf('django.db.models.fields.DateTimeField')(blank=True, null=True, db_index=True)),
('pid', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('work_time', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('spider_stats', self.gf('django.db.models.fields.TextField')(blank=True)),
('stop_time', self.gf('django.db.models.fields.DateTimeField')(blank=True, null=True, db_index=True)),
('task_name', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
('record_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True, db_index=True)),
))
db.send_create_signal(u'grabstat', ['TaskResult'])
# Deleting model 'Task'
db.delete_table(u'grabstat_task')
models = {
u'grabstat.task': {
'Meta': {'object_name': 'Task'},
'error_traceback': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'record_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'spider_stats': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'spider_timing': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '10', 'db_index': 'True', 'blank': 'True'}),
'stop_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'work_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['grabstat'] | mit |
ctb/cvxpy | examples/penalty_comp_cvx.py | 12 | 2035 | from __future__ import division
import sys
import cvxopt
import numpy as np
from pylab import *
import math
from cvxpy import *
# Taken from CVX website http://cvxr.com/cvx/examples/
# Figure 6.2: Penalty function approximation
# Ported from cvx matlab to cvxpy by Misrab Faizullah-Khan
# Original comments below
# Section 6.1.2
# Boyd & Vandenberghe "Convex Optimization"
# Original by Lieven Vandenberghe
# Adapted for CVX Argyris Zymnis - 10/2005
#
# Comparison of the ell1, ell2, deadzone-linear and log-barrier
# penalty functions for the approximation problem:
# minimize phi(A*x-b),
#
# where phi(x) is the penalty function
# Generate input data
m, n = 100, 30
A = cvxopt.normal(m,n) #np.random.randn(m,n)
b = cvxopt.normal(m,1) #np.random.randn(m,1)
# l-1 approximation
x1 = Variable(n)
objective1 = Minimize( norm(A*x1-b, 1) )
p1 = Problem(objective1, [])
#p1 = Problem(Minimize( norm(A*x1-b, 1), []))
# l-2 approximation
x2 = Variable(n)
objective2 = Minimize( norm(A*x2-b, 2) )
p2 = Problem(objective2, [])
# deadzone approximation
# minimize sum(deadzone(Ax+b,0.5))
# deadzone(y,z) = max(abs(y)-z,0)
def deadzone(y,z):
return pos(abs(y)-z)
dz = 0.5
xdz = Variable(n)
objective3 = Minimize( sum_entries( deadzone(A*xdz+b, dz) ) )
p3 = Problem(objective3, [])
# Solve the problems
p1.solve()
p2.solve()
p3.solve()
# Plot histogram of residuals
range_max=2.0
#rr = np.arange(-range_max, range_max, 1e-2)
rr = np.linspace(-2, 3, 20)
# l-1 plot
subplot(3, 1, 1)
n, bins, patches = hist(A*x1.value-b, 50, range=[-2, 2])
plot(bins, np.abs(bins)*35/3, '-') # multiply by scaling factor for plot
ylabel('l-1 norm')
title('Penalty function approximation')
# l-2 plot
subplot(3, 1, 2)
n, bins, patches = hist(A*x2.value-b, 50, range=[-2, 2])
plot(bins, np.power(bins, 2)*2, '-')
ylabel('l-2 norm')
# deadzone plot
subplot(3, 1, 3)
n, bins, patches = hist(A*xdz.value+b, 50, range=[-2, 2])
zeros = np.array([0 for x in bins])
plot(bins, np.maximum((np.abs(bins)-dz)*35/3, zeros), '-')
ylabel('deadzone')
show() | gpl-3.0 |
chouseknecht/ansible | lib/ansible/modules/network/panos/_panos_mgtconfig.py | 41 | 5776 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_mgtconfig
short_description: configure management settings of device
description:
- Configure management settings of device
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
options:
dns_server_primary:
description:
- address of primary DNS server
dns_server_secondary:
description:
- address of secondary DNS server
panorama_primary:
description:
- address of primary Panorama server
panorama_secondary:
description:
- address of secondary Panorama server
commit:
description:
- commit if changed
type: bool
default: 'yes'
extends_documentation_fragment: panos
'''
EXAMPLES = '''
- name: set dns and panorama
panos_mgtconfig:
ip_address: "192.168.1.1"
password: "admin"
dns_server_primary: "1.1.1.1"
dns_server_secondary: "1.1.1.2"
panorama_primary: "1.1.1.3"
panorama_secondary: "1.1.1.4"
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_XPATH_DNS_SERVERS = "/config/devices/entry[@name='localhost.localdomain']" +\
"/deviceconfig/system/dns-setting/servers"
_XPATH_PANORAMA_SERVERS = "/config" +\
"/devices/entry[@name='localhost.localdomain']" +\
"/deviceconfig/system"
def set_dns_server(xapi, new_dns_server, primary=True):
if primary:
tag = "primary"
else:
tag = "secondary"
xpath = _XPATH_DNS_SERVERS + "/" + tag
# check the current element value
xapi.get(xpath)
val = xapi.element_root.find(".//" + tag)
if val is not None:
# element exists
val = val.text
if val == new_dns_server:
return False
element = "<%(tag)s>%(value)s</%(tag)s>" %\
dict(tag=tag, value=new_dns_server)
xapi.edit(xpath, element)
return True
def set_panorama_server(xapi, new_panorama_server, primary=True):
if primary:
tag = "panorama-server"
else:
tag = "panorama-server-2"
xpath = _XPATH_PANORAMA_SERVERS + "/" + tag
# check the current element value
xapi.get(xpath)
val = xapi.element_root.find(".//" + tag)
if val is not None:
# element exists
val = val.text
if val == new_panorama_server:
return False
element = "<%(tag)s>%(value)s</%(tag)s>" %\
dict(tag=tag, value=new_panorama_server)
xapi.edit(xpath, element)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
dns_server_primary=dict(),
dns_server_secondary=dict(),
panorama_primary=dict(),
panorama_secondary=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
dns_server_primary = module.params['dns_server_primary']
dns_server_secondary = module.params['dns_server_secondary']
panorama_primary = module.params['panorama_primary']
panorama_secondary = module.params['panorama_secondary']
commit = module.params['commit']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
changed = False
try:
if dns_server_primary is not None:
changed |= set_dns_server(xapi, dns_server_primary, primary=True)
if dns_server_secondary is not None:
changed |= set_dns_server(xapi, dns_server_secondary, primary=False)
if panorama_primary is not None:
changed |= set_panorama_server(xapi, panorama_primary, primary=True)
if panorama_secondary is not None:
changed |= set_panorama_server(xapi, panorama_secondary, primary=False)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
UWave/uwave-eas | gr-same/docs/doxygen/doxyxml/__init__.py | 333 | 2474 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Python interface to contents of doxygen xml documentation.
Example use:
See the contents of the example folder for the C++ and
doxygen-generated xml used in this example.
>>> # Parse the doxygen docs.
>>> import os
>>> this_dir = os.path.dirname(globals()['__file__'])
>>> xml_path = this_dir + "/example/xml/"
>>> di = DoxyIndex(xml_path)
Get a list of all top-level objects.
>>> print([mem.name() for mem in di.members()])
[u'Aadvark', u'aadvarky_enough', u'main']
Get all functions.
>>> print([mem.name() for mem in di.in_category(DoxyFunction)])
[u'aadvarky_enough', u'main']
Check if an object is present.
>>> di.has_member(u'Aadvark')
True
>>> di.has_member(u'Fish')
False
Get an item by name and check its properties.
>>> aad = di.get_member(u'Aadvark')
>>> print(aad.brief_description)
Models the mammal Aadvark.
>>> print(aad.detailed_description)
Sadly the model is incomplete and cannot capture all aspects of an aadvark yet.
<BLANKLINE>
This line is uninformative and is only to test line breaks in the comments.
>>> [mem.name() for mem in aad.members()]
[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness']
>>> aad.get_member(u'print').brief_description
u'Outputs the vital aadvark statistics.'
"""
from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
def _test():
import os
this_dir = os.path.dirname(globals()['__file__'])
xml_path = this_dir + "/example/xml/"
di = DoxyIndex(xml_path)
# Get the Aadvark class
aad = di.get_member('Aadvark')
aad.brief_description
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| gpl-2.0 |
mith1979/ansible_automation | applied_python/applied_python/lib/python2.7/site-packages/django/core/management/commands/test.py | 267 | 3933 | import logging
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
class Command(BaseCommand):
help = 'Discover and run tests in the specified modules or the current directory.'
requires_system_checks = False
def __init__(self):
self.test_runner = None
super(Command, self).__init__()
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
def add_arguments(self, parser):
parser.add_argument('args', metavar='test_label', nargs='*',
help='Module paths to test; can be modulename, modulename.TestCase or modulename.TestCase.test_method')
parser.add_argument('--noinput',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
parser.add_argument('--failfast',
action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first '
'failed test.'),
parser.add_argument('--testrunner',
action='store', dest='testrunner',
help='Tells Django to use specified test runner class instead of '
'the one specified by the TEST_RUNNER setting.'),
parser.add_argument('--liveserver',
action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used '
'with LiveServerTestCase) is expected to run from. The '
'default value is localhost:8081.'),
test_runner_class = get_runner(settings, self.test_runner)
if hasattr(test_runner_class, 'option_list'):
# Keeping compatibility with both optparse and argparse at this level
# would be too heavy for a non-critical item
raise RuntimeError(
"The method to extend accepted command-line arguments by the "
"test management command has changed in Django 1.8. Please "
"create an add_arguments class method to achieve this.")
if hasattr(test_runner_class, 'add_arguments'):
test_runner_class.add_arguments(parser)
def execute(self, *args, **options):
if options['verbosity'] > 0:
# ensure that deprecation warnings are displayed during testing
# the following state is assumed:
# logging.capturewarnings is true
# a "default" level warnings filter has been added for
# DeprecationWarning. See django.conf.LazySettings._configure_logging
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
super(Command, self).execute(*args, **options)
if options['verbosity'] > 0:
# remove the testing-specific handler
logger.removeHandler(handler)
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, options.get('testrunner'))
if options.get('liveserver') is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
del options['liveserver']
test_runner = TestRunner(**options)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
| apache-2.0 |
pyfisch/servo | tests/wpt/web-platform-tests/tools/taskcluster/verify.py | 12 | 1029 | import argparse
import json
import os
import jsone
import yaml
here = os.path.dirname(__file__)
root = os.path.abspath(os.path.join(here, "..", ".."))
def create_parser():
return argparse.ArgumentParser()
def run(venv, **kwargs):
with open(os.path.join(root, ".taskcluster.yml")) as f:
template = yaml.safe_load(f)
events = [("pr_event.json", "github-pull-request", "Pull Request"),
("master_push_event.json", "github-push", "Push to master")]
for filename, tasks_for, title in events:
with open(os.path.join(here, "testdata", filename)) as f:
event = json.load(f)
context = {"tasks_for": tasks_for,
"event": event,
"as_slugid": lambda x: x}
data = jsone.render(template, context)
heading = "Got %s tasks for %s" % (len(data["tasks"]), title)
print(heading)
print("=" * len(heading))
for item in data["tasks"]:
print(json.dumps(item, indent=2))
print("")
| mpl-2.0 |
hectoruelo/scrapy | scrapy/__init__.py | 89 | 1434 | """
Scrapy - a web crawling and web scraping framework written for Python
"""
__all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',
'Spider', 'Request', 'FormRequest', 'Selector', 'Item', 'Field']
# Scrapy version
import pkgutil
__version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()
version_info = tuple(int(v) if v.isdigit() else v
for v in __version__.split('.'))
del pkgutil
# Check minimum required Python version
import sys
if sys.version_info < (2, 7):
print("Scrapy %s requires Python 2.7" % __version__)
sys.exit(1)
# Ignore noisy twisted deprecation warnings
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')
del warnings
# Apply monkey patches to fix issues in external libraries
from . import _monkeypatches
del _monkeypatches
# WARNING: optional_features set is deprecated and will be removed soon. Do not use.
optional_features = set()
# TODO: backwards compatibility, remove for Scrapy 0.20
optional_features.add('ssl')
from twisted import version as _txv
twisted_version = (_txv.major, _txv.minor, _txv.micro)
if twisted_version >= (11, 1, 0):
optional_features.add('http11')
# Declare top-level shortcuts
from scrapy.spiders import Spider
from scrapy.http import Request, FormRequest
from scrapy.selector import Selector
from scrapy.item import Item, Field
del sys
| bsd-3-clause |
tmerrick1/spack | var/spack/repos/builtin/packages/perl-math-matrixreal/package.py | 5 | 1698 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlMathMatrixreal(PerlPackage):
"""Implements the data type "matrix of real numbers" (and consequently
also "vector of real numbers")."""
homepage = "http://search.cpan.org/~leto/Math-MatrixReal/lib/Math/MatrixReal.pm"
url = "http://search.cpan.org/CPAN/authors/id/L/LE/LETO/Math-MatrixReal-2.13.tar.gz"
version('2.13', 'cf9d6ff71f2df075559ea752104ca199')
depends_on('perl-module-build', type='build')
| lgpl-2.1 |
Princeton-Quadcopter/PQ-Software | Ardupilot/Tools/scripts/add_git_hashes.py | 216 | 1614 | #!/usr/bin/env python
'''
Add git hashes to .px4 file for PX4/Pixhawk build
Written by Jon Challinger January 2015
'''
import json
import sys
import os
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_file')
parser.add_argument('output_file')
parser.add_argument('--ardupilot')
parser.add_argument('--px4')
parser.add_argument('--nuttx')
parser.add_argument('--uavcan')
args = parser.parse_args()
f = open(args.input_file,'r')
fw_json = json.load(f)
f.close()
if args.ardupilot is not None:
try:
fw_json["ardupilot_git_hash"] = subprocess.check_output(["git", "--git-dir", os.path.join(args.ardupilot,".git"), "rev-parse", "HEAD"]).strip().decode('ascii')
except:
print("Failed to get apm hash")
if args.px4 is not None:
try:
fw_json["px4_git_hash"] = subprocess.check_output(["git", "--git-dir", os.path.join(args.px4,".git"), "rev-parse", "HEAD"]).strip().decode('ascii')
except:
print("Failed to get px4 hash")
if args.nuttx is not None:
try:
fw_json["nuttx_git_hash"] = subprocess.check_output(["git", "--git-dir", os.path.join(args.nuttx,".git"), "rev-parse", "HEAD"]).strip().decode('ascii')
except:
print("Failed to get nuttx hash")
if args.uavcan is not None:
try:
fw_json["uavcan_git_hash"] = subprocess.check_output(["git", "--git-dir", os.path.join(args.uavcan,".git"), "rev-parse", "HEAD"]).strip().decode('ascii')
except:
print("Failed to get uavcan hash")
f=open(args.output_file,'w')
json.dump(fw_json,f,indent=4)
f.truncate()
f.close()
| gpl-3.0 |
ting-yuan/web-page-replay | third_party/ipaddr/ipaddr.py | 92 | 60528 | #!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = '2.1.10'
import struct
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def v4_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
Raises:
ValueError: If the integer is too large to be an IPv4 IP
address.
"""
if address > _BaseV4._ALL_ONES:
raise ValueError('Address too large for IPv4')
return Bytes(struct.pack('!I', address))
def v6_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
"""
return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(first), str(last)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
# backwards compatibility
CollapseAddrList = collapse_address_list
# We need to distinguish between the string and packed-bytes representations
# of an IP address. For example, b'0::1' is the IPv4 address 48.58.58.49,
# while '0::1' is an IPv6 address.
#
# In Python 3, the native 'bytes' type already provides this functionality,
# so we use it directly. For earlier implementations where bytes is not a
# distinct type, we create a subclass of str to serve as a tag.
#
# Usage example (Python 2):
# ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
#
# Usage example (Python 3):
# ip = ipaddr.IPAddress(b'xxxx')
try:
if bytes is str:
raise TypeError("bytes is not a distinct type")
Bytes = bytes
except (NameError, TypeError):
class Bytes(str):
def __repr__(self):
return 'Bytes(%s)' % str.__repr__(self)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) + other, version=self._version)
def __sub__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) - other, version=self._version)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(long(self._ip)))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
if isinstance(other, _BaseIP):
return (self._version == other._version
and self._ip == other._ip)
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNet):
return (self.network <= other.network and
self.broadcast >= other.broadcast)
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IPNetwork('10.1.1.0/24')
addr2 = IPNetwork('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
or IPv6:
addr1 = IPNetwork('::1/32')
addr2 = IPNetwork('::1/128')
addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
IPNetwork('::2/127'),
IPNetwork('::4/126'),
IPNetwork('::8/125'),
...
IPNetwork('0:0:8000::/33')]
Args:
other: An IPvXNetwork object of the same type.
Returns:
A sorted list of IPvXNetwork objects addresses which is self
minus other.
Raises:
TypeError: If self and other are of difffering address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError("%s is not a network object" % str(other))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
if other == self:
return []
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def masked(self):
"""Return the network object with the host bits masked out."""
return IPNetwork('%s/%d' % (self.network, self._prefixlen),
version=self._version)
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an iterator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
def __init__(self, address):
self._version = 4
self._max_prefixlen = IPV4LENGTH
def _explode_shorthand_ip_string(self):
return str(self)
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
packed_ip = 0
for oc in octets:
try:
packed_ip = (packed_ip << 8) | self._parse_octet(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _parse_octet(self, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._DECIMAL_DIGITS.issuperset(octet_str):
raise ValueError
octet_int = int(octet_str, 10)
# Disallow leading zeroes, because no clear standard exists on
# whether these should be interpreted as decimal or octal.
if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
raise ValueError
return octet_int
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
self._ip, = struct.unpack('!I', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.168.1.1/24'
'192.168.1.1/255.255.255.0'
'192.168.1.1/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.168.1.1'
'192.168.1.1/255.255.255.255'
'192.168.1.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.168.1.1') == IPv4Network(3232235777).
or, more generally
IPv4Network(int(IPv4Network('192.168.1.1'))) ==
IPv4Network('192.168.1.1')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Constructing from an integer or packed bytes.
if isinstance(address, (int, long, Bytes)):
self.ip = IPv4Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%s is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
if [x for x in mask if int(x) not in self._valid_mask_octets]:
return False
if [y for idx, y in enumerate(mask) if idx > 0 and
y > mask[idx - 1]]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
def __init__(self, address):
self._version = 6
self._max_prefixlen = IPV6LENGTH
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
if len(parts) < 3:
raise AddressValueError(ip_str)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
ipv4_int = IPv4Address(parts.pop())._ip
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
if len(parts) > self._HEXTET_COUNT + 1:
raise AddressValueError(ip_str)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
try:
skip_index, = (
[i for i in xrange(1, len(parts) - 1) if not parts[i]] or
[None])
except ValueError:
# Can't have more than one '::'
raise AddressValueError(ip_str)
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
raise AddressValueError(ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
raise AddressValueError(ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
raise AddressValueError(ip_str)
else:
# Otherwise, allocate the entire address to parts_hi. The endpoints
# could still be empty, but _parse_hextet() will check for that.
if len(parts) != self._HEXTET_COUNT:
raise AddressValueError(ip_str)
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0L
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in xrange(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError:
raise AddressValueError(ip_str)
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
else:
ip_str = str(self)
ip_int = self._ip_int_from_string(ip_str)
parts = []
for i in xrange(self._HEXTET_COUNT):
parts.append('%04x' % (ip_int & 0xFFFF))
ip_int >>= 16
parts.reverse()
if isinstance(self, _BaseNet):
return '%s/%d' % (':'.join(parts), self.prefixlen)
return ':'.join(parts)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
hi, lo = struct.unpack('!QQ', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
self._ip = (hi << 64) | lo
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not addr_str:
raise AddressValueError('')
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:4860::') ==
IPv6Network(42541956101370907050197289607612071936L).
or, more generally
IPv6Network(IPv6Network('2001:4860::')._ip) ==
IPv6Network('2001:4860::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Constructing from an integer or packed bytes.
if isinstance(address, (int, long, Bytes)):
self.ip = IPv6Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError(addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= self._max_prefixlen
@property
def with_netmask(self):
return self.with_prefixlen
| apache-2.0 |
FEniCS/fiat | FIAT/crouzeix_raviart.py | 1 | 2172 | # Copyright (C) 2010 Marie E. Rognes
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by Marie E. Rognes <meg@simula.no> based on original
# implementation by Robert C. Kirby.
#
# Last changed: 2010-01-28
from FIAT import finite_element, polynomial_set, dual_set, functional
def _initialize_entity_ids(topology):
entity_ids = {}
for (i, entity) in list(topology.items()):
entity_ids[i] = {}
for j in entity:
entity_ids[i][j] = []
return entity_ids
class CrouzeixRaviartDualSet(dual_set.DualSet):
"""Dual basis for Crouzeix-Raviart element (linears continuous at
boundary midpoints)."""
def __init__(self, cell, degree):
# Get topology dictionary
d = cell.get_spatial_dimension()
topology = cell.get_topology()
# Initialize empty nodes and entity_ids
entity_ids = _initialize_entity_ids(topology)
nodes = [None for i in list(topology[d - 1].keys())]
# Construct nodes and entity_ids
for i in topology[d - 1]:
# Construct midpoint
x = cell.make_points(d - 1, i, d)[0]
# Degree of freedom number i is evaluation at midpoint
nodes[i] = functional.PointEvaluation(cell, x)
entity_ids[d - 1][i] += [i]
# Initialize super-class
super(CrouzeixRaviartDualSet, self).__init__(nodes, cell, entity_ids)
class CrouzeixRaviart(finite_element.CiarletElement):
"""The Crouzeix-Raviart finite element:
K: Triangle/Tetrahedron
Polynomial space: P_1
Dual basis: Evaluation at facet midpoints
"""
def __init__(self, cell, degree):
# Crouzeix Raviart is only defined for polynomial degree == 1
if not (degree == 1):
raise Exception("Crouzeix-Raviart only defined for degree 1")
# Construct polynomial spaces, dual basis and initialize
# FiniteElement
space = polynomial_set.ONPolynomialSet(cell, 1)
dual = CrouzeixRaviartDualSet(cell, 1)
super(CrouzeixRaviart, self).__init__(space, dual, 1)
| lgpl-3.0 |
bzennn/blog_flask | python/lib/python3.5/site-packages/jinja2/runtime.py | 73 | 26835 | # -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
import sys
from itertools import chain
from types import MethodType
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr, evalcontextfunction
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2, \
with_metaclass
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if value is not missing:
parent[key] = value
return environment.context_class(environment, parent, template_name,
blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
def _get_func(x):
return getattr(x, '__func__', x)
class ContextMeta(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if bases == ():
return rv
resolve = _get_func(rv.resolve)
default_resolve = _get_func(Context.resolve)
resolve_or_missing = _get_func(rv.resolve_or_missing)
default_resolve_or_missing = _get_func(Context.resolve_or_missing)
# If we have a changed resolve but no changed default or missing
# resolve we invert the call logic.
if resolve is not default_resolve and \
resolve_or_missing is default_resolve_or_missing:
rv._legacy_resolve_mode = True
elif resolve is default_resolve and \
resolve_or_missing is default_resolve_or_missing:
rv._fast_resolve_mode = True
return rv
def resolve_or_missing(context, key, missing=missing):
if key in context.vars:
return context.vars[key]
if key in context.parent:
return context.parent[key]
return missing
class Context(with_metaclass(ContextMeta)):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
# XXX: we want to eventually make this be a deprecation warning and
# remove it.
_legacy_resolve_mode = False
_fast_resolve_mode = False
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
# In case we detect the fast resolve mode we can set up an alias
# here that bypasses the legacy code logic.
if self._fast_resolve_mode:
self.resolve_or_missing = MethodType(resolve_or_missing, self)
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if self._legacy_resolve_mode:
rv = resolve_or_missing(self, key)
else:
rv = self.resolve_or_missing(key)
if rv is missing:
return self.environment.undefined(name=key)
return rv
def resolve_or_missing(self, key):
"""Resolves a variable like :meth:`resolve` but returns the
special `missing` value if it cannot be found.
"""
if self._legacy_resolve_mode:
rv = self.resolve(key)
if isinstance(rv, Undefined):
rv = missing
return rv
return resolve_or_missing(self, key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return the complete context as dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
"""
if not self.vars:
return self.parent
if not self.parent:
return self.vars
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
context = new_context(self.environment, self.name, {},
self.get_all(), True, None, locals)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve_or_missing(key)
if item is missing:
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContextBase(object):
"""A loop context for dynamic iteration."""
_after = _last_iteration
_length = None
def __init__(self, recurse=None, depth0=0):
self._recurse = recurse
self.index0 = -1
self.depth0 = depth0
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
class LoopContext(LoopContextBase):
def __init__(self, iterable, recurse=None, depth0=0):
LoopContextBase.__init__(self, recurse, depth0)
self._iterator = iter(iterable)
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
self._after = self._safe_next()
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that + the number of iterations so far.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
iterations_done = self.index0 + 2
self._length = len(iterable) + iterations_done
return self._length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments,
catch_kwargs, catch_varargs, caller,
default_autoescape=None):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
self.explicit_caller = 'caller' in arguments
if default_autoescape is None:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@internalcode
@evalcontextfunction
def __call__(self, *args, **kwargs):
# This requires a bit of explanation, In the past we used to
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
# problemtic for macros because if a macro is safe or not not so
# much depends on the escape mode when it was defined but when it
# was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
# will continue to not pass one), we need to perform an instance
# check here.
#
# This is considered safe because an eval context is not a valid
# argument to callables otherwise anwyays. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
autoescape = args[0].autoescape
args = args[1:]
else:
autoescape = self._default_autoescape
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
# of caller in the `macro_body` handler in the compiler.
found_caller = False
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
value = missing
if name == 'caller':
found_caller = True
arguments.append(value)
else:
found_caller = self.explicit_caller
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
if 'caller' in kwargs:
raise TypeError('macro %r was invoked with two values for '
'the special caller argument. This is '
'most likely a bug.' % self.name)
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._invoke(arguments, autoescape)
def _invoke(self, arguments, autoescape):
"""This method is being swapped out by the async implementation."""
rv = self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`jinja2.exceptions.UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
__rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'Undefined'
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
base = Undefined
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
| gpl-3.0 |
Antiun/odoo | addons/survey/__openerp__.py | 261 | 2391 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey',
'version': '2.0',
'category': 'Marketing',
'description': """
Create beautiful web surveys and visualize answers
==================================================
It depends on the answers or reviews of some questions by different users. A
survey may have multiple pages. Each page may contain multiple questions and
each question may have multiple answers. Different users may give different
answers of question and according to that survey is done. Partners are also
sent mails with personal token for the invitation of the survey.
""",
'summary': 'Create surveys, collect answers and print statistics',
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/survey',
'depends': ['email_template', 'mail', 'website', 'marketing'],
'data': [
'security/survey_security.xml',
'security/ir.model.access.csv',
'views/survey_views.xml',
'views/survey_templates.xml',
'views/survey_result.xml',
'wizard/survey_email_compose_message.xml',
'data/survey_stages.xml',
'data/survey_cron.xml'
],
'demo': ['data/survey_demo_user.xml',
'data/survey_demo_feedback.xml',
'data/survey.user_input.csv',
'data/survey.user_input_line.csv'],
'installable': True,
'auto_install': False,
'application': True,
'sequence': 10,
}
| agpl-3.0 |
srimai/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
v-iam/azure-sdk-for-python | azure-mgmt-scheduler/azure/mgmt/scheduler/models/storage_queue_message.py | 5 | 1417 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageQueueMessage(Model):
"""StorageQueueMessage.
:param storage_account: Gets or sets the storage account name.
:type storage_account: str
:param queue_name: Gets or sets the queue name.
:type queue_name: str
:param sas_token: Gets or sets the SAS key.
:type sas_token: str
:param message: Gets or sets the message.
:type message: str
"""
_attribute_map = {
'storage_account': {'key': 'storageAccount', 'type': 'str'},
'queue_name': {'key': 'queueName', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, storage_account=None, queue_name=None, sas_token=None, message=None):
self.storage_account = storage_account
self.queue_name = queue_name
self.sas_token = sas_token
self.message = message
| mit |
roelandjansen/UHSDR | mchf-eclipse/support/python/uhsdr.py | 2 | 7846 | """
This module contains experimental code for using the (extend) UHSDR API
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
__author__ = "DB4PLE"
__copyright__ = "Copyright 2018, UHSDR project"
__credits__ = ["DB4PLE"]
__license__ = "GPLv3"
__status__ = "Prototype"
import sys
import os
class CatCmdFt817:
"""
List of used/supported FT817 CAT command codes
This list includes the officially known FT817 codes (including the "undocumented" ones)
"""
READ_EEPROM = 0xBB
WRITE_EEPROM = 0xBC
class CatCmd(CatCmdFt817):
"""
This list extends the FT817 by the special UHSDR codes implemented only in the UHSDR dialect
of FT817 CAT (which must be different from the FT817 ones)
"""
UHSDR_ID = 0x42
"""
this will return the bytes ['U', 'H' , 'S', 'D', 'R' ] and is used to identify an UHSDR with high enough firmware level
"""
class UhsdrConfigIndex:
"""
this is a completely incomplete list of config value indicies as used in the UHSDR firmware
we list only the absolutely necessary ids here for the moment (those must never change in different firmware versions, would break this code)
"""
VER_MAJOR = 176
VER_MINOR = 310
VER_BUILD = 171
NUMBER_OF_ENTRIES = 407
def eprint(*args, **kwargs):
"""
a small function to print to stderr, used for error and logging messages
"""
print(*args, file=sys.stderr, **kwargs)
class catSerial:
"""
Low Level FT817 CAT protocol handling on a serial communication
"""
def __init__(self, comObj):
self.comObj = comObj
def sendCommand(self, command):
bytesWritten = self.comObj.write(command)
return bytesWritten == 5
def readResponse(self,count):
response = self.comObj.read(count)
return (len(response) == count,response)
class catCommands:
"""
CAT API: Here we have direct access to CAT API ations, each logical API function has its direct counterpart in this
class
We do not implement any extra control logic in here, just the call of the API function and returning the response
This may be enriched to have all availabl CAT API functions, right now it is just what we need now
"""
def __init__(self, catObj):
self.catObj = catObj
def execute(self, cmd, count):
if self.catObj.sendCommand(cmd):
ok,res = self.catObj.readResponse(count)
return ok,bytearray(res)
else:
return (False,bytearray([]))
def readEEPROM(self, addr):
cmd = bytearray([ (addr & 0xff00)>>8,addr & 0xff, 0x00, 0x00, CatCmd.READ_EEPROM])
ok,res = self.execute(cmd,2)
if ok:
return res[1] * 256 + res[0]
else:
return ok
def readUHSDR(self):
cmd = bytearray([ 0x00, 0x00 , 0x00, 0x00, CatCmd.UHSDR_ID])
ok,res = self.execute(cmd,5)
return res == bytearray("UHSDR")
def writeEEPROM(self, addr, value16bit):
cmd = bytearray([ (addr & 0xff00)>>8,addr & 0xff, (value16bit & 0xff) >> 0, (value16bit & 0xff00) >> 8, CatCmd.WRITE_EEPROM])
ok,res = self.execute(cmd,1)
return ok
def readUHSDRConfig(self, index):
return self.readEEPROM(index + 0x8000);
def writeUHSDRConfig(self, index, value):
return self.writeEEPROM(index + 0x8000, value);
class UhsdrConfig():
"""
CONFIG MANAGEMENT: Handling of reading / writing TRX configurations, detection of TRX presence etc.
This class represents high-level actions, should involve proper parameter checking etc.
"""
def __init__(self, catObj):
self.catObj = catObj
def getVersion(self):
"""
return firmware version as integer tuple (major,minor,build)
or (False,False,False) if something goes wrong
"""
return (self.catObj.readUHSDRConfig(UhsdrConfigIndex.VER_MAJOR),
self.catObj.readUHSDRConfig(UhsdrConfigIndex.VER_MINOR),
self.catObj.readUHSDRConfig(UhsdrConfigIndex.VER_BUILD))
def isUhsdrConnected(self):
"""
we test if an UHSDR with extended API is connected by using an identification API call not present
on a FT817 or older UHSDR / mcHF firmwares
returns: True if a suitable TRX is connected, False otherwise
"""
return self.catObj.readUHSDR()
#
def getConfigValueCount(self):
return self.catObj.readUHSDRConfig(UhsdrConfigIndex.NUMBER_OF_ENTRIES)
def getValue(self, index):
# TODO: do some range checking here
return self.catObj.readUHSDRConfig(index)
def setValue(self, index, value):
# TODO: do some range checking here
retval = False
if self.catObj.writeUHSDRConfig(index, value):
retval = value == self.getValue(index)
return retval
def configToJson(self):
"""
read the configuration from TRX into a data dictionary
returns tuple with boolean success state and read data
right now the returned JSON structure is quite simple
we store the version number as list of 3 integers under the 'version' key
we store the date and time of backup as UTC under the 'when' key
we store each configuration value as addr/value pair under the 'eeprom' key
"""
from datetime import datetime
retval = False
valList = []
self.data = {}
self.data['version'] = self.getVersion()
self.data['when'] = str(datetime.utcnow())
self.data['eeprom'] = []
numberOfValues = self.getConfigValueCount()
for index in range(numberOfValues):
val = self.getValue(index)
valList.append(val)
self.data['eeprom'].append({ 'addr' : index , 'value' : val })
retval = all(val is not False for val in valList) or len(valList) != 0
return retval,self.data
def jsonToConfig(self,data):
"""
write the configuration in passed data dictionary to TRX
returns tuple with boolean success state and human readable return msg
data dictionary must conform to format generated by configToJson()
"""
retval = True
retmsg = "OK"
if data['when'] != None and len(data['version']) == 3 and len(data['eeprom']) == data['eeprom'][UhsdrConfigIndex.NUMBER_OF_ENTRIES]['value']:
numberOfValues = data['eeprom'][UhsdrConfigIndex.NUMBER_OF_ENTRIES]['value']
# we do not restore index 0 as it contains EEPROM type. This is never changed during configuration backup, too dangerous
for index in range(numberOfValues):
addr = data['eeprom'][index]['addr']
value = data['eeprom'][index]['value']
if addr != 0:
if self.setValue(addr,value) == False:
retmsg = "Restoring value {} at addr {} failed".format(value,addr)
retval = False
break
else:
retmsg = "Configuration data failed consistency check"
retval = False
return retval,retmsg
| gpl-3.0 |
hasadna/open-shot | qa/search_indexes.py | 1 | 1187 | import datetime
from haystack import indexes
from celery_haystack.indexes import CelerySearchIndex
from models import Question, Answer
class AnswerIndex(CelerySearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
created_at = indexes.DateTimeField(model_attr='created_at')
place = indexes.CharField(model_attr='question__entity__slug')
def get_model(self):
return Answer
def index_queryset(self, **kwargs):
"""Used when the entire index for model is updated."""
return self.get_model().objects.all()
class QuestionIndex(CelerySearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
# text = indexes.CharField(model_attr='subject')
author = indexes.CharField(model_attr='author')
created_at = indexes.DateTimeField(model_attr='created_at')
place = indexes.CharField(model_attr='entity__slug')
def get_model(self):
return Question
def index_queryset(self, **kwargs):
"""Used when the entire index for model is updated."""
return self.get_model().objects.all()
| bsd-3-clause |
40223119/2015w13 | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/dummy/connection.py | 707 | 3049 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
| gpl-3.0 |
liquidia/easyengine | ee/core/shellexec.py | 7 | 1809 | """EasyEngine shell executaion functions."""
from ee.core.logging import Log
import os
import sys
import subprocess
class CommandExecutionError(Exception):
"""custom Exception for command execution"""
pass
class EEShellExec():
"""Method to run shell commands"""
def __init__():
pass
def cmd_exec(self, command, errormsg='', log=True):
"""Run shell command from Python"""
try:
log and Log.debug(self, "Running command: {0}".format(command))
with subprocess.Popen([command], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True) as proc:
(cmd_stdout_bytes, cmd_stderr_bytes) = proc.communicate()
(cmd_stdout, cmd_stderr) = (cmd_stdout_bytes.decode('utf-8',
"replace"),
cmd_stderr_bytes.decode('utf-8',
"replace"))
if proc.returncode == 0:
return True
else:
Log.debug(self, "Command Output: {0}, \nCommand Error: {1}"
.format(cmd_stdout, cmd_stderr))
return False
except OSError as e:
Log.debug(self, str(e))
raise CommandExecutionError
except Exception as e:
Log.debug(self, str(e))
raise CommandExecutionError
def invoke_editor(self, filepath, errormsg=''):
"""
Open files using sensible editor
"""
try:
subprocess.call(['sensible-editor', filepath])
except OSError as e:
Log.debug(self, "{0}{1}".format(e.errno, e.strerror))
raise CommandExecutionError
| mit |
bowang/tensorflow | tensorflow/contrib/ndlstm/python/__init__.py | 135 | 1103 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all ndlstm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member
from tensorflow.contrib.ndlstm.python.lstm1d import *
from tensorflow.contrib.ndlstm.python.lstm2d import *
from tensorflow.contrib.ndlstm.python.misc import *
# pylint: enable=wildcard-import
| apache-2.0 |
muntasirsyed/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/backends/mysql/introspection.py | 624 | 1426 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
| apache-2.0 |
frouty/odoo_oph | addons/account_followup/report/account_followup_print.py | 39 | 5871 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from collections import defaultdict
from openerp import pooler
from openerp.report import report_sxw
class report_rappel(report_sxw.rml_parse):
_name = "account_followup.report.rappel"
def __init__(self, cr, uid, name, context=None):
super(report_rappel, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'ids_to_objects': self._ids_to_objects,
'getLines': self._lines_get,
'get_text': self._get_text
})
def _ids_to_objects(self, ids):
pool = pooler.get_pool(self.cr.dbname)
all_lines = []
for line in pool.get('account_followup.stat.by.partner').browse(self.cr, self.uid, ids):
if line not in all_lines:
all_lines.append(line)
return all_lines
def _lines_get(self, stat_by_partner_line):
return self._lines_get_with_partner(stat_by_partner_line.partner_id, stat_by_partner_line.company_id.id)
def _lines_get_with_partner(self, partner, company_id):
pool = pooler.get_pool(self.cr.dbname)
moveline_obj = pool.get('account.move.line')
moveline_ids = moveline_obj.search(self.cr, self.uid, [
('partner_id', '=', partner.id),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
])
# lines_per_currency = {currency: [line data, ...], ...}
lines_per_currency = defaultdict(list)
for line in moveline_obj.browse(self.cr, self.uid, moveline_ids):
currency = line.currency_id or line.company_id.currency_id
line_data = {
'name': line.move_id.name,
'ref': line.ref,
'date': line.date,
'date_maturity': line.date_maturity,
'balance': line.amount_currency if currency != line.company_id.currency_id else line.debit - line.credit,
'blocked': line.blocked,
'currency_id': currency,
}
lines_per_currency[currency].append(line_data)
return [{'line': lines} for lines in lines_per_currency.values()]
def _get_text(self, stat_line, followup_id, context=None):
if context is None:
context = {}
context.update({'lang': stat_line.partner_id.lang})
fp_obj = pooler.get_pool(self.cr.dbname).get('account_followup.followup')
fp_line = fp_obj.browse(self.cr, self.uid, followup_id, context=context).followup_line
if not fp_line:
raise osv.except_osv(_('Error!'),_("The followup plan defined for the current company does not have any followup action."))
#the default text will be the first fp_line in the sequence with a description.
default_text = ''
li_delay = []
for line in fp_line:
if not default_text and line.description:
default_text = line.description
li_delay.append(line.delay)
li_delay.sort(reverse=True)
a = {}
#look into the lines of the partner that already have a followup level, and take the description of the higher level for which it is available
partner_line_ids = pooler.get_pool(self.cr.dbname).get('account.move.line').search(self.cr, self.uid, [('partner_id','=',stat_line.partner_id.id),('reconcile_id','=',False),('company_id','=',stat_line.company_id.id),('blocked','=',False),('state','!=','draft'),('debit','!=',False),('account_id.type','=','receivable'),('followup_line_id','!=',False)])
partner_max_delay = 0
partner_max_text = ''
for i in pooler.get_pool(self.cr.dbname).get('account.move.line').browse(self.cr, self.uid, partner_line_ids, context=context):
if i.followup_line_id.delay > partner_max_delay and i.followup_line_id.description:
partner_max_delay = i.followup_line_id.delay
partner_max_text = i.followup_line_id.description
text = partner_max_delay and partner_max_text or default_text
if text:
text = text % {
'partner_name': stat_line.partner_id.name,
'date': time.strftime('%Y-%m-%d'),
'company_name': stat_line.company_id.name,
'user_signature': pooler.get_pool(self.cr.dbname).get('res.users').browse(self.cr, self.uid, self.uid, context).signature or '',
}
return text
report_sxw.report_sxw('report.account_followup.followup.print',
'account_followup.stat.by.partner', 'addons/account_followup/report/account_followup_print.rml',
parser=report_rappel)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
janeen666/mi-instrument | mi/dataset/parser/wfp_eng__stc_imodem.py | 5 | 7780 | #!/usr/bin/env python
"""
@package mi.dataset.parser.wfp_eng__stc_imodem_particles
@file marine-integrations/mi/dataset/parser/wfp_eng__stc_imodem_particles.py
@author Mark Worden
@brief Particles for the WFP_ENG__STC_IMODEM dataset driver
Release notes:
initial release
"""
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
import copy
import ntplib
import struct
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import SampleException, DatasetParserException, UnexpectedDataException
from mi.dataset.parser.WFP_E_file_common import WfpEFileParser, StateKey, \
HEADER_BYTES, SAMPLE_BYTES, STATUS_BYTES, PROFILE_MATCHER, HEADER_MATCHER
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
class WfpEngStcImodemParser(WfpEFileParser):
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
*args, **kwargs):
self._saved_header = None
log.debug(config)
particle_classes_dict = config.get(DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT)
self._start_data_particle_class = particle_classes_dict.get('start_data_particle_class')
self._status_data_particle_class = particle_classes_dict.get('status_data_particle_class')
self._engineering_data_particle_class = particle_classes_dict.get('engineering_data_particle_class')
super(WfpEngStcImodemParser, self).__init__(config,
state,
stream_handle,
state_callback,
publish_callback,
*args, **kwargs)
def set_state(self, state_obj):
"""
initialize the state
"""
log.trace("Attempting to set state to: %s", state_obj)
if not isinstance(state_obj, dict):
raise DatasetParserException("Invalid state structure")
if not (StateKey.POSITION in state_obj):
raise DatasetParserException("Invalid state keys")
self._chunker.clean_all_chunks()
self._record_buffer = []
self._saved_header = None
self._state = state_obj
self._read_state = state_obj
self._stream_handle.seek(state_obj[StateKey.POSITION])
def _parse_header(self):
"""
Parse the start time of the profile and the sensor
"""
# read the first bytes from the file
header = self._stream_handle.read(HEADER_BYTES)
match = HEADER_MATCHER.match(header)
# parse the header
if match is not None:
# use the profile start time as the timestamp
fields = struct.unpack('>II', match.group(2))
timestamp = int(fields[1])
self._timestamp = float(ntplib.system_to_ntp_time(timestamp))
log.debug(self._start_data_particle_class)
sample = self._extract_sample(self._start_data_particle_class,
None,
header,
internal_timestamp=self._timestamp)
if sample:
# create particle
self._increment_state(HEADER_BYTES)
log.debug("Extracting header %s with read_state: %s", sample, self._read_state)
self._saved_header = (sample, copy.copy(self._read_state))
else:
raise SampleException("File header does not match header regex")
def parse_record(self, record):
"""
determine if this is a engineering or data record and parse
"""
result_particle = []
# Attempt to match on the profile status record
match = PROFILE_MATCHER.match(record)
if match is not None:
# send to WFP_eng_profiler if WFP
fields = struct.unpack('>ihhII', match.group(0))
# use the profile stop time
timestamp = int(fields[3])
self._timestamp = float(ntplib.system_to_ntp_time(timestamp))
log.debug(self._status_data_particle_class)
sample = self._extract_sample(self._status_data_particle_class, None,
record, internal_timestamp=self._timestamp)
self._increment_state(STATUS_BYTES)
else:
# The record data must be an engineering data record since it was not a profile status record
# pull out the timestamp for this record
fields = struct.unpack('>I', record[:4])
timestamp = int(fields[0])
self._timestamp = float(ntplib.system_to_ntp_time(timestamp))
log.trace("Converting record timestamp %f to ntp timestamp %f", timestamp, self._timestamp)
log.debug(self._engineering_data_particle_class)
sample = self._extract_sample(self._engineering_data_particle_class, None,
record, internal_timestamp=self._timestamp)
self._increment_state(SAMPLE_BYTES)
if sample:
# create particle
log.trace("Extracting sample %s with read_state: %s", sample, self._read_state)
result_particle = (sample, copy.copy(self._read_state))
return result_particle
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
# header gets read in initialization, but need to send it back from parse_chunks
if self._saved_header:
result_particles.append(self._saved_header)
self._saved_header = None
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
result_particle = self.parse_record(chunk)
if result_particle:
result_particles.append(result_particle)
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
return result_particles
def handle_non_data(self, non_data, non_end, start):
"""
This method handles any non-data that is found in the file
"""
# if non-data is expected, handle it here, otherwise it is an error
if non_data is not None and non_end <= start:
# if this non-data is an error, send an UnexpectedDataException and increment the state
self._increment_state(len(non_data))
# if non-data is a fatal error, directly call the exception, if it is not use the _exception_callback
self._exception_callback(UnexpectedDataException("Found %d bytes of un-expected non-data %s" %
(len(non_data), non_data)))
| bsd-2-clause |
tobegit3hub/glance_docker | glance/db/sqlalchemy/metadef_api/property.py | 6 | 6128 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy import func
import sqlalchemy.orm as sa_orm
from glance.common import exception as exc
from glance.db.sqlalchemy.metadef_api import namespace as namespace_api
from glance.db.sqlalchemy.metadef_api import utils as metadef_utils
from glance.db.sqlalchemy import models_metadef as models
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
def _get(context, property_id, session):
try:
query = session.query(models.MetadefProperty).filter_by(id=property_id)
property_rec = query.one()
except sa_orm.exc.NoResultFound:
msg = (_("Metadata definition property not found for id=%s")
% property_id)
LOG.warn(msg)
raise exc.MetadefPropertyNotFound(msg)
return property_rec
def _get_by_name(context, namespace_name, name, session):
"""get a property; raise if ns not found/visible or property not found"""
namespace = namespace_api.get(context, namespace_name, session)
try:
query = session.query(models.MetadefProperty).filter_by(
name=name, namespace_id=namespace['id'])
property_rec = query.one()
except sa_orm.exc.NoResultFound:
msg = ("The metadata definition property with name=%(name)s"
" was not found in namespace=%(namespace_name)s."
% {'name': name, 'namespace_name': namespace_name})
LOG.debug(msg)
raise exc.MetadefPropertyNotFound(property_name=name,
namespace_name=namespace_name)
return property_rec
def get(context, namespace_name, name, session):
"""get a property; raise if ns not found/visible or property not found"""
property_rec = _get_by_name(context, namespace_name, name, session)
return property_rec.to_dict()
def get_all(context, namespace_name, session):
namespace = namespace_api.get(context, namespace_name, session)
query = session.query(models.MetadefProperty).filter_by(
namespace_id=namespace['id'])
properties = query.all()
properties_list = []
for prop in properties:
properties_list.append(prop.to_dict())
return properties_list
def create(context, namespace_name, values, session):
namespace = namespace_api.get(context, namespace_name, session)
values.update({'namespace_id': namespace['id']})
property_rec = models.MetadefProperty()
metadef_utils.drop_protected_attrs(models.MetadefProperty, values)
property_rec.update(values.copy())
try:
property_rec.save(session=session)
except db_exc.DBDuplicateEntry:
msg = ("Can not create metadata definition property. A property"
" with name=%(name)s already exists in"
" namespace=%(namespace_name)s."
% {'name': property_rec.name,
'namespace_name': namespace_name})
LOG.debug(msg)
raise exc.MetadefDuplicateProperty(
property_name=property_rec.name,
namespace_name=namespace_name)
return property_rec.to_dict()
def update(context, namespace_name, property_id, values, session):
"""Update a property, raise if ns not found/visible or duplicate result"""
namespace_api.get(context, namespace_name, session)
property_rec = _get(context, property_id, session)
metadef_utils.drop_protected_attrs(models.MetadefProperty, values)
# values['updated_at'] = timeutils.utcnow() - done by TS mixin
try:
property_rec.update(values.copy())
property_rec.save(session=session)
except db_exc.DBDuplicateEntry:
msg = ("Invalid update. It would result in a duplicate"
" metadata definition property with the same name=%(name)s"
" in namespace=%(namespace_name)s."
% {'name': property_rec.name,
'namespace_name': namespace_name})
LOG.debug(msg)
emsg = (_("Invalid update. It would result in a duplicate"
" metadata definition property with the same name=%(name)s"
" in namespace=%(namespace_name)s.")
% {'name': property_rec.name,
'namespace_name': namespace_name})
raise exc.MetadefDuplicateProperty(emsg)
return property_rec.to_dict()
def delete(context, namespace_name, property_name, session):
property_rec = _get_by_name(
context, namespace_name, property_name, session)
if property_rec:
session.delete(property_rec)
session.flush()
return property_rec.to_dict()
def delete_namespace_content(context, namespace_id, session):
"""Use this def only if the ns for the id has been verified as visible"""
count = 0
query = session.query(models.MetadefProperty).filter_by(
namespace_id=namespace_id)
count = query.delete(synchronize_session='fetch')
return count
def delete_by_namespace_name(context, namespace_name, session):
namespace = namespace_api.get(context, namespace_name, session)
return delete_namespace_content(context, namespace['id'], session)
def count(context, namespace_name, session):
"""Get the count of properties for a namespace, raise if ns not found"""
namespace = namespace_api.get(context, namespace_name, session)
query = session.query(func.count(models.MetadefProperty.id)).filter_by(
namespace_id=namespace['id'])
return query.scalar()
| apache-2.0 |
theguardian/KodiDB | cherrypy/_cpthreadinglocal.py | 194 | 6616 | # This is a backport of Python-2.4's threading.local() implementation
"""Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
from threading import currentThread, enumerate, RLock
| gpl-2.0 |
SlimLP-Y300/chil360-kernel | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
mcardillo55/django | tests/wsgi/tests.py | 188 | 4145 | from __future__ import unicode_literals
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.core.servers.basehttp import get_internal_wsgi_application
from django.core.signals import request_started
from django.core.wsgi import get_wsgi_application
from django.db import close_old_connections
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import six
@override_settings(ROOT_URLCONF="wsgi.urls")
class WSGITest(TestCase):
def setUp(self):
request_started.disconnect(close_old_connections)
def tearDown(self):
request_started.connect(close_old_connections)
def test_get_wsgi_application(self):
"""
Verify that ``get_wsgi_application`` returns a functioning WSGI
callable.
"""
application = get_wsgi_application()
environ = RequestFactory()._base_environ(
PATH_INFO="/",
CONTENT_TYPE="text/html; charset=utf-8",
REQUEST_METHOD="GET"
)
response_data = {}
def start_response(status, headers):
response_data["status"] = status
response_data["headers"] = headers
response = application(environ, start_response)
self.assertEqual(response_data["status"], "200 OK")
self.assertEqual(
response_data["headers"],
[('Content-Type', 'text/html; charset=utf-8')])
self.assertEqual(
bytes(response),
b"Content-Type: text/html; charset=utf-8\r\n\r\nHello World!")
def test_file_wrapper(self):
"""
Verify that FileResponse uses wsgi.file_wrapper.
"""
class FileWrapper(object):
def __init__(self, filelike, blksize=8192):
filelike.close()
application = get_wsgi_application()
environ = RequestFactory()._base_environ(
PATH_INFO='/file/',
REQUEST_METHOD='GET',
**{'wsgi.file_wrapper': FileWrapper}
)
response_data = {}
def start_response(status, headers):
response_data['status'] = status
response_data['headers'] = headers
response = application(environ, start_response)
self.assertEqual(response_data['status'], '200 OK')
self.assertIsInstance(response, FileWrapper)
class GetInternalWSGIApplicationTest(unittest.TestCase):
@override_settings(WSGI_APPLICATION="wsgi.wsgi.application")
def test_success(self):
"""
If ``WSGI_APPLICATION`` is a dotted path, the referenced object is
returned.
"""
app = get_internal_wsgi_application()
from .wsgi import application
self.assertIs(app, application)
@override_settings(WSGI_APPLICATION=None)
def test_default(self):
"""
If ``WSGI_APPLICATION`` is ``None``, the return value of
``get_wsgi_application`` is returned.
"""
# Mock out get_wsgi_application so we know its return value is used
fake_app = object()
def mock_get_wsgi_app():
return fake_app
from django.core.servers import basehttp
_orig_get_wsgi_app = basehttp.get_wsgi_application
basehttp.get_wsgi_application = mock_get_wsgi_app
try:
app = get_internal_wsgi_application()
self.assertIs(app, fake_app)
finally:
basehttp.get_wsgi_application = _orig_get_wsgi_app
@override_settings(WSGI_APPLICATION="wsgi.noexist.app")
def test_bad_module(self):
with six.assertRaisesRegex(self,
ImproperlyConfigured,
r"^WSGI application 'wsgi.noexist.app' could not be loaded; Error importing.*"):
get_internal_wsgi_application()
@override_settings(WSGI_APPLICATION="wsgi.wsgi.noexist")
def test_bad_name(self):
with six.assertRaisesRegex(self,
ImproperlyConfigured,
r"^WSGI application 'wsgi.wsgi.noexist' could not be loaded; Error importing.*"):
get_internal_wsgi_application()
| bsd-3-clause |
yl565/statsmodels | tools/cythonize.py | 5 | 6566 | #!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'statsmodels'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
EXCLUSION_FILE = 'cythonize_exclusions.dat'
DEFAULT_ROOT = 'statsmodels'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.19'):
raise Exception('Building Statsmodels requires Cython >= 0.19')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building Statsmodels requires Tempita: '
'pip install --user Tempita')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx' : process_pyx,
'.pyx.in' : process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Exclusions
#
def load_exclusions(filename):
# Return [ path ]
if os.path.isfile(filename):
with open(filename, 'r') as f:
paths = f.read().splitlines()
else:
paths = []
return paths
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
exclusions = load_exclusions(EXCLUSION_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
if os.path.join(cur_dir, filename) in exclusions:
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| bsd-3-clause |
SPriyaJain/studybuddy | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 360 | 2852 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.16'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| mit |
travisfcollins/gnuradio | gnuradio-runtime/python/pmt/__init__.py | 39 | 1997 | #
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# The presence of this file turns this directory into a Python package
'''
Polymorphic Types.
The type can really be used to store anything, but also has simple
conversion methods for common data types such as bool, long, or a
vector.
The polymorphic type simplifies message passing between blocks, as all
of the data is of the same type, including the message. Tags also use
PMTs as data type, so a stream tag can be of any logical data type. In
a sense, PMTs are a way to extend C++' strict typing with something
more flexible.
The PMT library supports the following major types:
bool, symbol (string), integer, real, complex, null, pair, list,
vector, dict, uniform_vector, any (boost::any cast)
'''
import os
try:
from pmt_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from pmt_swig import *
# due to changes in the PMT_NIL singleton for static builds, we force
# this into Python here.
PMT_NIL = get_PMT_NIL()
PMT_T = get_PMT_T()
PMT_F = get_PMT_F()
PMT_EOF = get_PMT_EOF()
from pmt_to_python import pmt_to_python as to_python
from pmt_to_python import python_to_pmt as to_pmt
| gpl-3.0 |
defionscode/ansible-modules-extras | messaging/rabbitmq_vhost.py | 161 | 4201 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: rabbitmq_vhost
short_description: Manage the state of a virtual host in RabbitMQ
description:
- Manage the state of a virtual host in RabbitMQ
version_added: "1.1"
author: '"Chris Hoffman (@choffman)"'
options:
name:
description:
- The name of the vhost to manage
required: true
default: null
aliases: [vhost]
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
tracing:
description:
- Enable/disable tracing for a vhost
default: "no"
choices: [ "yes", "no" ]
aliases: [trace]
state:
description:
- The state of vhost
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Ensure that the vhost /test exists.
- rabbitmq_vhost: name=/test state=present
'''
class RabbitMqVhost(object):
def __init__(self, module, name, tracing, node):
self.module = module
self.name = name
self.tracing = tracing
self.node = node
self._tracing = False
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self.node]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True)
for vhost in vhosts:
name, tracing = vhost.split('\t')
if name == self.name:
self._tracing = self.module.boolean(tracing)
return True
return False
def add(self):
return self._exec(['add_vhost', self.name])
def delete(self):
return self._exec(['delete_vhost', self.name])
def set_tracing(self):
if self.tracing != self._tracing:
if self.tracing:
self._enable_tracing()
else:
self._disable_tracing()
return True
return False
def _enable_tracing(self):
return self._exec(['trace_on', '-p', self.name])
def _disable_tracing(self):
return self._exec(['trace_off', '-p', self.name])
def main():
arg_spec = dict(
name=dict(required=True, aliases=['vhost']),
tracing=dict(default='off', aliases=['trace'], type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default='rabbit'),
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
tracing = module.params['tracing']
state = module.params['state']
node = module.params['node']
rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node)
changed = False
if rabbitmq_vhost.get():
if state == 'absent':
rabbitmq_vhost.delete()
changed = True
else:
if rabbitmq_vhost.set_tracing():
changed = True
elif state == 'present':
rabbitmq_vhost.add()
rabbitmq_vhost.set_tracing()
changed = True
module.exit_json(changed=changed, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
nash-x/hws | cinder/quota_utils.py | 11 | 2646 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import quota
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def get_volume_type_reservation(ctxt, volume, type_id):
# Reserve quotas for the given volume type
try:
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(ctxt,
reserve_opts,
type_id)
reservations = QUOTAS.reserve(ctxt, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
s_size = volume['size']
d_quota = quotas[over]
d_consumed = _consumed(over)
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume - (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': ctxt.project_id,
's_size': s_size,
'd_consumed': d_consumed,
'd_quota': d_quota})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=s_size, quota=d_quota, consumed=d_consumed)
elif 'volumes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
"already consumed)")
LOG.warn(msg % {'s_pid': ctxt.project_id,
'd_consumed': _consumed(over)})
raise exception.VolumeLimitExceeded(
allowed=quotas[over])
return reservations
| apache-2.0 |
roberthartung/risk | map_absolute-to-relative.py | 1 | 3374 | from xml.dom.minidom import parse, Element, Node
INPUT="map.svg"
OUTPUT="web/map.svg"
doc = parse(INPUT)
svgroot=doc.getElementsByTagName('svg')[0]
connectorsElement = None
countriesElement = None
for child in svgroot.childNodes:
if (child.nodeType == Node.ELEMENT_NODE) and (child.nodeName == "g"):
attrid = child.attributes['id'].value
if attrid == 'connectors':
connectorsElement = child
elif attrid == 'countries':
countriesElement = child
for continentNode in countriesElement.childNodes:
if (continentNode.nodeType == Node.ELEMENT_NODE) and (continentNode.nodeName == "g"):
continentid = continentNode.attributes['id'].value
# print(continentNode)
for countryNode in continentNode.childNodes:
if (countryNode.nodeType == Node.ELEMENT_NODE) and (countryNode.nodeName == "g"):
countryid = countryNode.attributes['id'].value
primaryPartNode = None
offset = None
for countryPartNode in countryNode.childNodes:
if (countryPartNode.nodeType == Node.ELEMENT_NODE) and (countryPartNode.nodeName == "path"):
# print(countryPartNode.attributes['id'].value)
if primaryPartNode == None:
primaryPartNode = countryPartNode
coords = primaryPartNode.attributes['d'].value.split(' ')
offset = coords[1].split(',')
offset[0] = float(offset[0])
offset[1] = float(offset[1])
countryNode.attributes['transform'] = 'translate('+str(offset[0])+','+str(offset[1])+')'
coords = countryPartNode.attributes['d'].value.split(' ')
firstCoords = coords[1].split(',')
firstCoords[0] = float(firstCoords[0])
firstCoords[1] = float(firstCoords[1])
coords[1] = str(firstCoords[0]-offset[0])+','+str(firstCoords[1]-offset[1])
countryPartNode.attributes['d'] = ' '.join(coords)
circle=countryNode.getElementsByTagName('circle')[0]
circle.attributes['cx'] = str(float(circle.attributes['cx'].value)-offset[0])
circle.attributes['cy'] = str(float(circle.attributes['cy'].value)-offset[1])
circle.attributes['style'].value += '; stroke: #333; stroke-width: 1; stroke-dasharray: 0 0;'
text = doc.createElement('text')
text.appendChild(doc.createTextNode('1337'))
text.attributes['x'] = circle.attributes['cx'].value
text.attributes['y'] = circle.attributes['cy'].value
text.attributes['dy'] = '.3em'
text.attributes['text-anchor'] = 'middle'
text.attributes['fill'] = 'white'
text.attributes['style'] = 'font-size: 11px;'
#text.attributes['baseline-shift'] =
countryNode.appendChild(text)
# <text x="50%" y="50%" text-anchor="middle" stroke="#51c5cf" stroke-width="2px" dy=".3em">Look, I’m centered!Look, I’m centered!</text>
with open(OUTPUT, 'w') as fh:
fh.write(doc.toxml("UTF-8").decode("utf-8"))
fh.close()
| bsd-3-clause |
Donkyhotay/MoonPy | zope/i18n/negotiator.py | 1 | 2214 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Language Negotiator
$Id: negotiator.py 26559 2004-07-15 21:22:32Z srichter $
"""
from zope.i18n.interfaces import INegotiator
from zope.i18n.interfaces import IUserPreferredLanguages
from zope.interface import implements
def normalize_lang(lang):
lang = lang.strip().lower()
lang = lang.replace('_', '-')
lang = lang.replace(' ', '')
return lang
def normalize_langs(langs):
# Make a mapping from normalized->original so we keep can match
# the normalized lang and return the original string.
n_langs = {}
for l in langs:
n_langs[normalize_lang(l)] = l
return n_langs
class Negotiator(object):
implements(INegotiator)
def getLanguage(self, langs, env):
envadapter = IUserPreferredLanguages(env)
userlangs = envadapter.getPreferredLanguages()
# Prioritize on the user preferred languages. Return the
# first user preferred language that the object has available.
langs = normalize_langs(langs)
for lang in userlangs:
if lang in langs:
return langs.get(lang)
# If the user asked for a specific variation, but we don't
# have it available we may serve the most generic one,
# according to the spec (eg: user asks for ('en-us',
# 'de'), but we don't have 'en-us', then 'en' is preferred
# to 'de').
parts = lang.split('-')
if len(parts) > 1 and parts[0] in langs:
return langs.get(parts[0])
return None
negotiator = Negotiator()
| gpl-3.0 |
nixargh/dnswatch | dnswatch/config.py | 1 | 1957 | import os
import yaml
import logging
class Config:
def __init__(self):
self.logger = logging.getLogger("DNSWatch.Config")
self.dnsprovider = None
self.dnszone = None
def read(self, config_file):
self.logger.debug("Loading configuration from {}".format(config_file))
with open(os.path.realpath(config_file), "r") as f:
config = yaml.load(f)
# For backward compatibility with 0.2.* config
if "nsupdate" in config:
config["dnsupdate"] = config.pop("nsupdate")
# DNS query timeout is optional
if not "timeout" in config["dnsupdate"]:
config["dnsupdate"]["timeout"] = 10
# TTL is optional
if not "ttl" in config["dnsupdate"]:
config["dnsupdate"]["ttl"] = 300
# Aliases is optional
if not "alias" in config["dnsupdate"]:
config["dnsupdate"]["alias"] = dict()
# Do not rewrite DNS provider and zone under reload
if self.dnsprovider:
new_dnsprovider = config["dnsupdate"]["provider"]
if self.dnsprovider != new_dnsprovider:
self.logger.warning(
"DNS provider change ignored by reload: '{}' -> '{}'.".format(
self.dnsprovider, new_dnsprovider))
config["dnsupdate"]["provider"] = self.dnsprovider
else:
self.dnsprovider = config["dnsupdate"]["provider"]
if self.dnszone:
new_dnszone = config["dnsupdate"]["zone"]
if self.dnszone != new_dnszone:
self.logger.warning(
"DNS zone change ignored by reload: '{}' -> '{}'.".format(
self.dnszone, new_dnszone))
config["dnsupdate"]["zone"] = self.dnszone
else:
self.dnszone = config["dnsupdate"]["zone"]
self.logger.debug("Configuration loaded.")
return config
| gpl-3.0 |
AbrahmAB/sugar | src/jarabe/model/update/new_aslo.py | 1 | 3970 | # Copyright (C) 2014 Sam Parkinson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
from sugar3.bundle.bundleversion import NormalizedVersion, InvalidVersionError
from gi.repository import Gio
from jarabe.model.update import BundleUpdate
from jarabe.util.downloader import Downloader
from jarabe import config
class NewAsloUpdater(object):
"""
Checks for updates using the new ASLO's update.json file
"""
def __init__(self):
self._completion_cb = None
self._progress_cb = None
self._error_cb = None
self._bundles = []
self._downloader = None
self._canceled = False
def fetch_update_info(self, installed_bundles, auto, progress_cb,
completion_cb, error_cb):
self._completion_cb = completion_cb
self._progress_cb = progress_cb
self._error_cb = error_cb
self._bundles = installed_bundles
self._progress_cb('', 0) # Set the status to 'Looking for updates'
settings = Gio.Settings('org.sugarlabs.update')
data_json_url = settings.get_string('new-aslo-url')
self._downloader = Downloader(data_json_url)
self._downloader.connect('complete',
self.__data_json_download_complete_cb)
self._downloader.download()
def __data_json_download_complete_cb(self, downloader, result):
if self._canceled:
return
try:
activities = json.loads(result.get_data())['activities']
except ValueError:
self._error_cb('Can not parse loaded update.json')
return
updates = []
for i, bundle in enumerate(self._bundles):
self._progress_cb(bundle.get_name(), i / len(self._bundles))
if bundle.get_bundle_id() not in activities:
logging.debug('%s not in activities' % bundle.get_bundle_id())
continue
activity = activities[bundle.get_bundle_id()]
try:
version = NormalizedVersion(str(activity['version']))
min_sugar = NormalizedVersion(str(activity['minSugarVersion']))
except KeyError:
logging.debug('KeyError - %s' % bundle.get_bundle_id())
continue
except InvalidVersionError:
logging.debug('InvalidVersion - %s' % bundle.get_bundle_id())
continue
if NormalizedVersion(bundle.get_activity_version()) >= version:
logging.debug('%s is up to date' % bundle.get_bundle_id())
continue
if NormalizedVersion(config.version) < min_sugar:
logging.debug('Upgrade sugar for %s' % bundle.get_bundle_id())
continue
logging.debug('Marked for update: %s' % bundle.get_bundle_id())
u = BundleUpdate(bundle.get_bundle_id(), bundle.get_name(),
version,
activity['xo_url'],
activity.get('xo_size', 1024 * 2))
updates.append(u)
self._completion_cb(updates)
def cancel(self):
self._canceled = True
if self._downloader:
self._downloader.cancel()
self._completion_cb(None)
def clean(self):
self._canceled = False
| gpl-3.0 |
quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/jinja2/tests.py | 638 | 3444 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, mapping_types
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, mapping_types)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/mock.py | 424 | 75527 | # mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 1.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0.1'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps as original_wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
f.__wrapped__ = original
return f
return inner
else:
if sys.version_info[:2] >= (3, 3):
wraps = original_wraps
else:
def wraps(func):
def inner(f):
f = original_wraps(func)(f)
f.__wrapped__ = func
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
if inPy3k:
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
else:
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
if inPy3k:
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
else:
signature = inspect.formatargspec(
regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_delegate
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
exc_info = tuple()
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
| agpl-3.0 |
colajam93/aurpackager | manager/views.py | 1 | 5734 | import json
import os.path
from datetime import datetime
from django.forms.models import model_to_dict
from django.http import HttpResponse, FileResponse, HttpRequest
from django.shortcuts import render, redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import condition
from typing import Union
import packager.path
from lib.aur import servers
from manager.models import Package, Build, Artifact
from packager.settings_local import CUSTOM_LOCAL_REPOSITORY_DIR, CUSTOM_LOCAL_REPOSITORY
@ensure_csrf_cookie
def package_list(request):
packages = Package.objects.all().order_by('id')
for package in packages:
builds = Build.objects.filter(package_id=package.id).order_by('-id')
if len(builds) >= 1:
setattr(package, 'status', builds[0].status)
else:
setattr(package, 'status', 'None')
return render(request, 'package_list.html', {'packages': packages, 'active': 'list'})
@ensure_csrf_cookie
def package_detail(request, package_name):
package = Package.objects.get(name=package_name)
builds = Build.objects.filter(package_id=package.id).order_by('-id')
if Build.objects.filter(package_id=package.id, status=Build.SUCCESS).exists():
artifacts = Artifact.objects.filter(package=package)
else:
artifacts = []
for build, number in zip(builds, range(1, len(builds) + 1)):
build.number = number
return render(request, 'package_detail.html',
{'package': package, 'builds': builds, 'active': 'list', 'artifacts': artifacts})
@ensure_csrf_cookie
def package_register(request):
return render(request, 'package_register.html',
{'active': 'register', 'servers': servers(), 'OFFICIAL': Package.OFFICIAL})
@ensure_csrf_cookie
def package_register_detail(request, package_name):
return render(request, 'package_register_detail.html',
{'package_name': package_name, 'active': 'register', 'server': request.GET['server']})
@ensure_csrf_cookie
def build_detail(request, package_name, build_number):
package = Package.objects.get(name=package_name)
try:
build = Build.objects.filter(package_id=package.id).order_by('-id')[int(build_number) - 1]
build.number = build_number
except IndexError:
return redirect('manager:package_list')
is_success = build.status == Build.SUCCESS
artifacts = []
try:
sha256s = json.loads(build.sha256)
except json.JSONDecodeError:
sha256s = {}
for artifact in Artifact.objects.filter(package=package):
a = model_to_dict(artifact)
a['sha256'] = sha256s.get(a['name'], '')
artifacts.append(a)
return render(request, 'build_detail.html',
{'build': build, 'package': build.package, 'is_success': is_success, 'active': 'list',
'artifacts': artifacts})
@ensure_csrf_cookie
def build_log(request, package_name, build_number):
try:
build = Build.objects.filter(package__name=package_name).order_by('-id')[int(build_number) - 1]
build.number = build_number
except IndexError:
build = None
if build and not build.status == Build.BUILDING:
path = packager.path.build_to_path(build)
try:
with open(path.log_file, 'r') as f:
log = f.read()
except FileNotFoundError:
log = ''
return render(request, 'build_log.html',
{'build': build, 'package': build.package, 'log': log, 'active': 'list'})
else:
return HttpResponse(status=404)
def _package_response(path: str) -> Union[HttpResponse, FileResponse]:
"""
:param path: full path to package file
:return: FileResponse or HttpResponse(error)
"""
try:
f = open(path, 'rb')
response = FileResponse(f, content_type='application/x-xz')
response['Content-Length'] = os.path.getsize(path)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(os.path.basename(path))
return response
except FileNotFoundError:
return HttpResponse(status=404)
except PermissionError:
return HttpResponse(status=403)
@ensure_csrf_cookie
def build_download(request: HttpRequest, package_name: str, build_number: str) -> Union[HttpResponse, FileResponse]:
artifact = Artifact.objects.get(name=package_name)
try:
build = Build.objects.filter(package_id=artifact.package.id).order_by('-id')[int(build_number) - 1]
except IndexError:
build = None
if build and build.status == Build.SUCCESS:
path = packager.path.build_to_path(build)
result_file = path.artifact_file(artifact.name)
return _package_response(result_file)
else:
return HttpResponse(status=404)
def _repository_condition_func(request: HttpRequest, file_name: str) -> datetime:
path = os.path.join(CUSTOM_LOCAL_REPOSITORY_DIR, file_name)
return datetime.utcfromtimestamp(os.path.getmtime(path))
def _repository_last_modified_func(request: HttpRequest, file_name: str) -> datetime:
return _repository_condition_func(request, file_name)
def _repository_etag_func(request: HttpRequest, file_name: str) -> str:
return _repository_condition_func(request, file_name).isoformat()
@ensure_csrf_cookie
@condition(etag_func=_repository_etag_func, last_modified_func=_repository_last_modified_func)
def repository(request: HttpRequest, file_name: str) -> Union[HttpResponse, FileResponse]:
if not CUSTOM_LOCAL_REPOSITORY:
return HttpResponse(status=404)
path = os.path.join(CUSTOM_LOCAL_REPOSITORY_DIR, file_name)
return _package_response(path)
| mit |
SeanCameronConklin/aima-python | submissions/Fritz/c4-11-28/utils.py | 56 | 18937 | """Provides some utilities widely used by other modules"""
import bisect
import collections
import collections.abc
import functools
import operator
import os.path
import random
import math
# ______________________________________________________________________________
# Functions on Sequences and Iterables
def sequence(iterable):
"Coerce iterable to sequence, if it is not already one."
return (iterable if isinstance(iterable, collections.abc.Sequence)
else tuple(iterable))
def removeall(item, seq):
"""Return a copy of seq (or string) with all occurences of item removed."""
if isinstance(seq, str):
return seq.replace(item, '')
else:
return [x for x in seq if x != item]
def unique(seq): # TODO: replace with set
"""Remove duplicate elements from seq. Assumes hashable elements."""
return list(set(seq))
def count(seq):
"""Count the number of items in sequence that are interpreted as true."""
return sum(bool(x) for x in seq)
def product(numbers):
"""Return the product of the numbers, e.g. product([2, 3, 10]) == 60"""
result = 1
for x in numbers:
result *= x
return result
def first(iterable, default=None):
"Return the first element of an iterable or the next element of a generator; or default."
try:
return iterable[0]
except IndexError:
return default
except TypeError:
return next(iterable, default)
def is_in(elt, seq):
"""Similar to (elt in seq), but compares with 'is', not '=='."""
return any(x is elt for x in seq)
# ______________________________________________________________________________
# argmin and argmax
identity = lambda x: x
argmin = min
argmax = max
def argmin_random_tie(seq, key=identity):
"""Return a minimum element of seq; break ties at random."""
return argmin(shuffled(seq), key=key)
def argmax_random_tie(seq, key=identity):
"Return an element with highest fn(seq[i]) score; break ties at random."
return argmax(shuffled(seq), key=key)
def shuffled(iterable):
"Randomly shuffle a copy of iterable."
items = list(iterable)
random.shuffle(items)
return items
# ______________________________________________________________________________
# Statistical and mathematical functions
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function:
values = map(bin_function, values)
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1
if mode:
return sorted(list(bins.items()), key=lambda x: (x[1], x[0]),
reverse=True)
else:
return sorted(bins.items())
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors X and Y."""
return sum(x * y for x, y in zip(X, Y))
def element_wise_product(X, Y):
"""Return vector as an element-wise product of vectors X and Y"""
assert len(X) == len(Y)
return [x * y for x, y in zip(X, Y)]
def matrix_multiplication(X_M, *Y_M):
"""Return a matrix as a matrix-multiplication of X_M and arbitary number of matrices *Y_M"""
def _mat_mult(X_M, Y_M):
"""Return a matrix as a matrix-multiplication of two matrices X_M and Y_M
>>> matrix_multiplication([[1, 2, 3],
[2, 3, 4]],
[[3, 4],
[1, 2],
[1, 0]])
[[8, 8],[13, 14]]
"""
assert len(X_M[0]) == len(Y_M)
result = [[0 for i in range(len(Y_M[0]))] for j in range(len(X_M))]
for i in range(len(X_M)):
for j in range(len(Y_M[0])):
for k in range(len(Y_M)):
result[i][j] += X_M[i][k] * Y_M[k][j]
return result
result = X_M
for Y in Y_M:
result = _mat_mult(result, Y)
return result
def vector_to_diagonal(v):
"""Converts a vector to a diagonal matrix with vector elements
as the diagonal elements of the matrix"""
diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))]
for i in range(len(v)):
diag_matrix[i][i] = v[i]
return diag_matrix
def vector_add(a, b):
"""Component-wise addition of two vectors."""
return tuple(map(operator.add, a, b))
def scalar_vector_product(X, Y):
"""Return vector as a product of a scalar and a vector"""
return [X * y for y in Y]
def scalar_matrix_product(X, Y):
return [scalar_vector_product(X, y) for y in Y]
def inverse_matrix(X):
"""Inverse a given square matrix of size 2x2"""
assert len(X) == 2
assert len(X[0]) == 2
det = X[0][0] * X[1][1] - X[0][1] * X[1][0]
assert det != 0
inv_mat = scalar_matrix_product(1.0/det, [[X[1][1], -X[0][1]], [-X[1][0], X[0][0]]])
return inv_mat
def probability(p):
"Return true with probability p."
return p > random.uniform(0.0, 1.0)
def weighted_sample_with_replacement(seq, weights, n):
"""Pick n samples from seq at random, with replacement, with the
probability of each element in proportion to its corresponding
weight."""
sample = weighted_sampler(seq, weights)
return [sample() for _ in range(n)]
def weighted_sampler(seq, weights):
"Return a random-sample function that picks from seq weighted by weights."
totals = []
for w in weights:
totals.append(w + totals[-1] if totals else w)
return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))]
def rounder(numbers, d=4):
"Round a single number, or sequence of numbers, to d decimal places."
if isinstance(numbers, (int, float)):
return round(numbers, d)
else:
constructor = type(numbers) # Can be list, set, tuple, etc.
return constructor(rounder(n, d) for n in numbers)
def num_or_str(x):
"""The argument is a string; convert to a number if
possible, or strip it.
"""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip()
def normalize(dist):
"""Multiply each number by a constant such that the sum is 1.0"""
if isinstance(dist, dict):
total = sum(dist.values())
for key in dist:
dist[key] = dist[key] / total
assert 0 <= dist[key] <= 1, "Probabilities must be between 0 and 1."
return dist
total = sum(dist)
return [(n / total) for n in dist]
def clip(x, lowest, highest):
"""Return x clipped to the range [lowest..highest]."""
return max(lowest, min(x, highest))
def sigmoid(x):
"""Return activation value of x with sigmoid function"""
return 1/(1 + math.exp(-x))
def step(x):
"""Return activation value of x with sign function"""
return 1 if x >= 0 else 0
try: # math.isclose was added in Python 3.5; but we might be in 3.4
from math import isclose
except ImportError:
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"Return true if numbers a and b are close to each other."
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# ______________________________________________________________________________
# Misc Functions
# TODO: Use functools.lru_cache memoization decorator
def memoize(fn, slot=None):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, store results in a dictionary."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
def memoized_fn(*args):
if args not in memoized_fn.cache:
memoized_fn.cache[args] = fn(*args)
return memoized_fn.cache[args]
memoized_fn.cache = {}
return memoized_fn
def name(obj):
"Try to find some reasonable name for the object."
return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or
getattr(getattr(obj, '__class__', 0), '__name__', 0) or
str(obj))
def isnumber(x):
"Is x a number?"
return hasattr(x, '__int__')
def issequence(x):
"Is x a sequence?"
return isinstance(x, collections.abc.Sequence)
def print_table(table, header=None, sep=' ', numfmt='%g'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '%6.2f'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(
map(lambda seq: max(map(len, seq)),
list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row)))
def AIMAFile(components, mode='r'):
"Open a file based at the AIMA root directory."
aima_root = os.path.dirname(__file__)
aima_file = os.path.join(aima_root, *components)
return open(aima_file)
def DataFile(name, mode='r'):
"Return a file in the AIMA /aima-data directory."
return AIMAFile(['aima-data', name], mode)
# ______________________________________________________________________________
# Expressions
# See https://docs.python.org/3/reference/expressions.html#operator-precedence
# See https://docs.python.org/3/reference/datamodel.html#special-method-names
class Expr(object):
"""A mathematical expression with an operator and 0 or more arguments.
op is a str like '+' or 'sin'; args are Expressions.
Expr('x') or Symbol('x') creates a symbol (a nullary Expr).
Expr('-', x) creates a unary; Expr('+', x, 1) creates a binary."""
def __init__(self, op, *args):
self.op = str(op)
self.args = args
# Operator overloads
def __neg__(self): return Expr('-', self)
def __pos__(self): return Expr('+', self)
def __invert__(self): return Expr('~', self)
def __add__(self, rhs): return Expr('+', self, rhs)
def __sub__(self, rhs): return Expr('-', self, rhs)
def __mul__(self, rhs): return Expr('*', self, rhs)
def __pow__(self, rhs): return Expr('**',self, rhs)
def __mod__(self, rhs): return Expr('%', self, rhs)
def __and__(self, rhs): return Expr('&', self, rhs)
def __xor__(self, rhs): return Expr('^', self, rhs)
def __rshift__(self, rhs): return Expr('>>', self, rhs)
def __lshift__(self, rhs): return Expr('<<', self, rhs)
def __truediv__(self, rhs): return Expr('/', self, rhs)
def __floordiv__(self, rhs): return Expr('//', self, rhs)
def __matmul__(self, rhs): return Expr('@', self, rhs)
def __or__(self, rhs):
"Allow both P | Q, and P |'==>'| Q."
if isinstance(rhs, Expression):
return Expr('|', self, rhs)
else:
return PartialExpr(rhs, self)
# Reverse operator overloads
def __radd__(self, lhs): return Expr('+', lhs, self)
def __rsub__(self, lhs): return Expr('-', lhs, self)
def __rmul__(self, lhs): return Expr('*', lhs, self)
def __rdiv__(self, lhs): return Expr('/', lhs, self)
def __rpow__(self, lhs): return Expr('**', lhs, self)
def __rmod__(self, lhs): return Expr('%', lhs, self)
def __rand__(self, lhs): return Expr('&', lhs, self)
def __rxor__(self, lhs): return Expr('^', lhs, self)
def __ror__(self, lhs): return Expr('|', lhs, self)
def __rrshift__(self, lhs): return Expr('>>', lhs, self)
def __rlshift__(self, lhs): return Expr('<<', lhs, self)
def __rtruediv__(self, lhs): return Expr('/', lhs, self)
def __rfloordiv__(self, lhs): return Expr('//', lhs, self)
def __rmatmul__(self, lhs): return Expr('@', lhs, self)
def __call__(self, *args):
"Call: if 'f' is a Symbol, then f(0) == Expr('f', 0)."
if self.args:
raise ValueError('can only do a call for a Symbol, not an Expr')
else:
return Expr(self.op, *args)
# Equality and repr
def __eq__(self, other):
"'x == y' evaluates to True or False; does not build an Expr."
return (isinstance(other, Expr)
and self.op == other.op
and self.args == other.args)
def __hash__(self): return hash(self.op) ^ hash(self.args)
def __repr__(self):
op = self.op
args = [str(arg) for arg in self.args]
if op.isidentifier(): # f(x) or f(x, y)
return '{}({})'.format(op, ', '.join(args)) if args else op
elif len(args) == 1: # -x or -(x + 1)
return op + args[0]
else: # (x - y)
opp = (' ' + op + ' ')
return '(' + opp.join(args) + ')'
# An 'Expression' is either an Expr or a Number.
# Symbol is not an explicit type; it is any Expr with 0 args.
Number = (int, float, complex)
Expression = (Expr, Number)
def Symbol(name):
"A Symbol is just an Expr with no args."
return Expr(name)
def symbols(names):
"Return a tuple of Symbols; names is a comma/whitespace delimited str."
return tuple(Symbol(name) for name in names.replace(',', ' ').split())
def subexpressions(x):
"Yield the subexpressions of an Expression (including x itself)."
yield x
if isinstance(x, Expr):
for arg in x.args:
yield from subexpressions(arg)
def arity(expression):
"The number of sub-expressions in this expression."
if isinstance(expression, Expr):
return len(expression.args)
else: # expression is a number
return 0
# For operators that are not defined in Python, we allow new InfixOps:
class PartialExpr:
"""Given 'P |'==>'| Q, first form PartialExpr('==>', P), then combine with Q."""
def __init__(self, op, lhs): self.op, self.lhs = op, lhs
def __or__(self, rhs): return Expr(self.op, self.lhs, rhs)
def __repr__(self): return "PartialExpr('{}', {})".format(self.op, self.lhs)
def expr(x):
"""Shortcut to create an Expression. x is a str in which:
- identifiers are automatically defined as Symbols.
- ==> is treated as an infix |'==>'|, as are <== and <=>.
If x is already an Expression, it is returned unchanged. Example:
>>> expr('P & Q ==> Q')
((P & Q) ==> Q)
"""
if isinstance(x, str):
return eval(expr_handle_infix_ops(x), defaultkeydict(Symbol))
else:
return x
infix_ops = '==> <== <=>'.split()
def expr_handle_infix_ops(x):
"""Given a str, return a new str with ==> replaced by |'==>'|, etc.
>>> expr_handle_infix_ops('P ==> Q')
"P |'==>'| Q"
"""
for op in infix_ops:
x = x.replace(op, '|' + repr(op) + '|')
return x
class defaultkeydict(collections.defaultdict):
"""Like defaultdict, but the default_factory is a function of the key.
>>> d = defaultkeydict(len); d['four']
4
"""
def __missing__(self, key):
self[key] = result = self.default_factory(key)
return result
# ______________________________________________________________________________
# Queues: Stack, FIFOQueue, PriorityQueue
# TODO: Possibly use queue.Queue, queue.PriorityQueue
# TODO: Priority queues may not belong here -- see treatment in search.py
class Queue:
"""Queue is an abstract class/interface. There are three types:
Stack(): A Last In First Out Queue.
FIFOQueue(): A First In First Out Queue.
PriorityQueue(order, f): Queue in sorted order (default min-first).
Each type supports the following methods and functions:
q.append(item) -- add an item to the queue
q.extend(items) -- equivalent to: for item in items: q.append(item)
q.pop() -- return the top item from the queue
len(q) -- number of items in q (also q.__len())
item in q -- does q contain item?
Note that isinstance(Stack(), Queue) is false, because we implement stacks
as lists. If Python ever gets interfaces, Queue will be an interface."""
def __init__(self):
raise NotImplementedError
def extend(self, items):
for item in items:
self.append(item)
def Stack():
"""Return an empty list, suitable as a Last-In-First-Out Queue."""
return []
class FIFOQueue(Queue):
"""A First-In-First-Out Queue."""
def __init__(self):
self.A = []
self.start = 0
def append(self, item):
self.A.append(item)
def __len__(self):
return len(self.A) - self.start
def extend(self, items):
self.A.extend(items)
def pop(self):
e = self.A[self.start]
self.start += 1
if self.start > 5 and self.start > len(self.A) / 2:
self.A = self.A[self.start:]
self.start = 0
return e
def __contains__(self, item):
return item in self.A[self.start:]
class PriorityQueue(Queue):
"""A queue in which the minimum (or maximum) element (as determined by f and
order) is returned first. If order is min, the item with minimum f(x) is
returned first; if order is max, then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order=min, f=lambda x: x):
self.A = []
self.order = order
self.f = f
def append(self, item):
bisect.insort(self.A, (self.f(item), item))
def __len__(self):
return len(self.A)
def pop(self):
if self.order == min:
return self.A.pop(0)[1]
else:
return self.A.pop()[1]
def __contains__(self, item):
return any(item == pair[1] for pair in self.A)
def __getitem__(self, key):
for _, item in self.A:
if item == key:
return item
def __delitem__(self, key):
for i, (value, item) in enumerate(self.A):
if item == key:
self.A.pop(i)
# ______________________________________________________________________________
# Useful Shorthands
class Bool(int):
"""Just like `bool`, except values display as 'T' and 'F' instead of 'True' and 'False'"""
__str__ = __repr__ = lambda self: 'T' if self else 'F'
T = Bool(True)
F = Bool(False)
| mit |
bswartz/cinder | cinder/volume/drivers/hitachi/hnas_backend.py | 2 | 35089 | # Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Hitachi Unified Storage (HUS-HNAS) platform. Backend operations.
"""
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder.i18n import _, _LW, _LI, _LE
from cinder import exception
from cinder import ssh_utils
from cinder import utils
LOG = logging.getLogger("cinder.volume.driver")
HNAS_SSC_RETRIES = 5
class HnasBackend(object):
"""Back end. Talks to HUS-HNAS."""
def __init__(self, drv_configs):
self.drv_configs = drv_configs
self.sshpool = None
@utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES,
wait_random=True)
def run_cmd(self, cmd, ip0, user, pw, *args, **kwargs):
"""Run a command on SMU or using SSH
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string with version information
"""
LOG.debug('Enable ssh: %s',
six.text_type(self.drv_configs['ssh_enabled']))
if self.drv_configs['ssh_enabled'] != 'True':
# Direct connection via ssc
args = (cmd, '--user', user, '--password', pw, ip0) + args
try:
out, err = utils.execute(*args, **kwargs)
LOG.debug("command %(cmd)s result: out = %(out)s - err = "
"%(err)s", {'cmd': cmd, 'out': out, 'err': err})
return out, err
except putils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
LOG.debug("SSC connection error!")
msg = _("Failed to establish SSC connection.")
raise exception.HNASConnError(msg)
elif 'Connection reset' in e.stderr:
LOG.debug("HNAS connection reset!")
msg = _("HNAS has disconnected SSC")
raise exception.HNASConnError(msg)
else:
raise
else:
if self.drv_configs['cluster_admin_ip0'] is None:
# Connect to SMU through SSH and run ssc locally
args = (cmd, 'localhost') + args
else:
args = (cmd, '--smuauth',
self.drv_configs['cluster_admin_ip0']) + args
utils.check_ssh_injection(args)
command = ' '.join(args)
command = command.replace('"', '\\"')
if not self.sshpool:
server = self.drv_configs['mgmt_ip0']
port = int(self.drv_configs['ssh_port'])
username = self.drv_configs['username']
# We only accept private/public key auth
password = ""
privatekey = self.drv_configs['ssh_private_key']
self.sshpool = ssh_utils.SSHPool(server,
port,
None,
username,
password=password,
privatekey=privatekey)
with self.sshpool.item() as ssh:
try:
out, err = putils.ssh_execute(ssh, command,
check_exit_code=True)
LOG.debug("command %(cmd)s result: out = "
"%(out)s - err = %(err)s",
{'cmd': cmd, 'out': out, 'err': err})
return out, err
except putils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
LOG.debug("SSC connection error!")
msg = _("Failed to establish SSC connection.")
raise exception.HNASConnError(msg)
else:
raise putils.ProcessExecutionError
def get_version(self, cmd, ver, ip0, user, pw):
"""Gets version information from the storage unit
:param cmd: ssc command name
:param ver: string driver version
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formatted string with version information
"""
out, err = self.run_cmd(cmd, ip0, user, pw, "cluster-getmac",
check_exit_code=True)
hardware = out.split()[2]
out, err = self.run_cmd(cmd, ip0, user, pw, "ver",
check_exit_code=True)
lines = out.split('\n')
model = ""
for line in lines:
if 'Model:' in line:
model = line.split()[1]
if 'Software:' in line:
ver = line.split()[1]
# If not using SSH, the local utility version can be different from the
# one used in HNAS
if self.drv_configs['ssh_enabled'] != 'True':
out, err = utils.execute(cmd, "-version", check_exit_code=True)
util = out.split()[1]
out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 "
"RG: 0 RG_LU: 0 Utility_version: %(util)s" %
{'arr': hardware, 'mod': model, 'ver': ver, 'util': util})
else:
out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 "
"RG: 0 RG_LU: 0" %
{'arr': hardware, 'mod': model, 'ver': ver})
LOG.debug('get_version: %(out)s -- %(err)s', {'out': out, 'err': err})
return out
def get_iscsi_info(self, cmd, ip0, user, pw):
"""Gets IP addresses for EVSs, use EVSID as controller.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string with iSCSI information
"""
out, err = self.run_cmd(cmd, ip0, user, pw,
'evsipaddr', '-l',
check_exit_code=True)
lines = out.split('\n')
newout = ""
for line in lines:
if 'evs' in line and 'admin' not in line:
inf = line.split()
(evsnum, ip) = (inf[1], inf[3])
newout += "CTL: %s Port: 0 IP: %s Port: 3260 Link: Up\n" \
% (evsnum, ip)
LOG.debug('get_iscsi_info: %(out)s -- %(err)s',
{'out': out, 'err': err})
return newout
def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
"""Gets the list of filesystems and fsids.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param fslabel: filesystem label we want to get info
:returns: formated string with filesystems and fsids
"""
if fslabel is None:
out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a',
check_exit_code=True)
else:
out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-f', fslabel,
check_exit_code=True)
lines = out.split('\n')
single_evs = True
LOG.debug("Parsing output: %s", lines)
newout = ""
for line in lines:
if 'Not mounted' in line or 'Not determined' in line:
continue
if 'not' not in line and 'EVS' in line:
single_evs = False
if 'GB' in line or 'TB' in line:
LOG.debug("Parsing output: %s", line)
inf = line.split()
if not single_evs:
(fsid, fslabel, capacity) = (inf[0], inf[1], inf[3])
(used, perstr) = (inf[5], inf[7])
(availunit, usedunit) = (inf[4], inf[6])
else:
(fsid, fslabel, capacity) = (inf[0], inf[1], inf[2])
(used, perstr) = (inf[4], inf[6])
(availunit, usedunit) = (inf[3], inf[5])
if usedunit == 'GB':
usedmultiplier = units.Ki
else:
usedmultiplier = units.Mi
if availunit == 'GB':
availmultiplier = units.Ki
else:
availmultiplier = units.Mi
m = re.match("\((\d+)\%\)", perstr)
if m:
percent = m.group(1)
else:
percent = 0
newout += "HDP: %s %d MB %d MB %d %% LUs: 256 Normal %s\n" \
% (fsid, int(float(capacity) * availmultiplier),
int(float(used) * usedmultiplier),
int(percent), fslabel)
LOG.debug('get_hdp_info: %(out)s -- %(err)s',
{'out': newout, 'err': err})
return newout
def get_evs(self, cmd, ip0, user, pw, fsid):
"""Gets the EVSID for the named filesystem.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: EVS id of the file system
"""
out, err = self.run_cmd(cmd, ip0, user, pw, "evsfs", "list",
check_exit_code=True)
LOG.debug('get_evs: out %s.', out)
lines = out.split('\n')
for line in lines:
inf = line.split()
if fsid in line and (fsid == inf[0] or fsid == inf[1]):
return inf[3]
LOG.warning(_LW('get_evs: %(out)s -- No find for %(fsid)s'),
{'out': out, 'fsid': fsid})
return 0
def _get_evsips(self, cmd, ip0, user, pw, evsid):
"""Gets the EVS IPs for the named filesystem."""
out, err = self.run_cmd(cmd, ip0, user, pw,
'evsipaddr', '-e', evsid,
check_exit_code=True)
iplist = ""
lines = out.split('\n')
for line in lines:
inf = line.split()
if 'evs' in line:
iplist += inf[3] + ' '
LOG.debug('get_evsips: %s', iplist)
return iplist
def _get_fsid(self, cmd, ip0, user, pw, fslabel):
"""Gets the FSID for the named filesystem."""
out, err = self.run_cmd(cmd, ip0, user, pw, 'evsfs', 'list',
check_exit_code=True)
LOG.debug('get_fsid: out %s', out)
lines = out.split('\n')
for line in lines:
inf = line.split()
if fslabel in line and fslabel == inf[1]:
LOG.debug('get_fsid: %s', line)
return inf[0]
LOG.warning(_LW('get_fsid: %(out)s -- No info for %(fslabel)s'),
{'out': out, 'fslabel': fslabel})
return 0
def _get_targets(self, cmd, ip0, user, pw, evsid, tgtalias=None):
"""Get the target list of an EVS.
Get the target list of an EVS. Optionally can return the target
list of a specific target.
"""
LOG.debug("Getting target list for evs %s, tgtalias: %s.",
evsid, tgtalias)
try:
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", evsid, 'iscsi-target', 'list',
check_exit_code=True)
except putils.ProcessExecutionError as e:
LOG.error(_LE('Error getting iSCSI target info '
'from EVS %(evs)s.'), {'evs': evsid})
LOG.debug("_get_targets out: %(out)s, err: %(err)s.",
{'out': e.stdout, 'err': e.stderr})
return []
tgt_list = []
if 'No targets' in out:
LOG.debug("No targets found in EVS %(evsid)s.", {'evsid': evsid})
return tgt_list
tgt_raw_list = out.split('Alias')[1:]
for tgt_raw_info in tgt_raw_list:
tgt = {}
tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop()
tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop()
tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop()
tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop()
luns = []
tgt_raw_info = tgt_raw_info.split('\n\n')[1]
tgt_raw_list = tgt_raw_info.split('\n')[2:]
for lun_raw_line in tgt_raw_list:
lun_raw_line = lun_raw_line.strip()
lun_raw_line = lun_raw_line.split(' ')
lun = {}
lun['id'] = lun_raw_line[0]
lun['name'] = lun_raw_line.pop()
luns.append(lun)
tgt['luns'] = luns
if tgtalias == tgt['alias']:
return [tgt]
tgt_list.append(tgt)
if tgtalias is not None:
# We tried to find 'tgtalias' but didn't find. Return an empty
# list.
LOG.debug("There's no target %(alias)s in EVS %(evsid)s.",
{'alias': tgtalias, 'evsid': evsid})
return []
LOG.debug("Targets in EVS %(evs)s: %(tgtl)s.",
{'evs': evsid, 'tgtl': tgt_list})
return tgt_list
def _get_unused_lunid(self, cmd, ip0, user, pw, tgt_info):
if len(tgt_info['luns']) == 0:
return 0
free_lun = 0
for lun in tgt_info['luns']:
if int(lun['id']) == free_lun:
free_lun += 1
if int(lun['id']) > free_lun:
# Found a free LUN number
break
return free_lun
def get_nfs_info(self, cmd, ip0, user, pw):
"""Gets information on each NFS export.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string
"""
out, err = self.run_cmd(cmd, ip0, user, pw,
'for-each-evs', '-q',
'nfs-export', 'list',
check_exit_code=True)
lines = out.split('\n')
newout = ""
export = ""
path = ""
for line in lines:
inf = line.split()
if 'Export name' in line:
export = inf[2]
if 'Export path' in line:
path = inf[2]
if 'File system info' in line:
fs = ""
if 'File system label' in line:
fs = inf[3]
if 'Transfer setting' in line and fs != "":
fsid = self._get_fsid(cmd, ip0, user, pw, fs)
evsid = self.get_evs(cmd, ip0, user, pw, fsid)
ips = self._get_evsips(cmd, ip0, user, pw, evsid)
newout += "Export: %s Path: %s HDP: %s FSID: %s \
EVS: %s IPS: %s\n" \
% (export, path, fs, fsid, evsid, ips)
fs = ""
LOG.debug('get_nfs_info: %(out)s -- %(err)s',
{'out': newout, 'err': err})
return newout
def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
"""Creates a new Logical Unit.
If the operation can not be performed for some reason, utils.execute()
throws an error and aborts the operation. Used for iSCSI only
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool the logical unit will be created
:param size: Size (Mb) of the new logical unit
:param name: name of the logical unit
:returns: formated string with 'LUN %d HDP: %d size: %s MB, is
successfully created'
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-lu', 'add', "-e",
name, hdp,
'/.cinder/' + name + '.iscsi',
size + 'M',
check_exit_code=True)
out = "LUN %s HDP: %s size: %s MB, is successfully created" \
% (name, hdp, size)
LOG.debug('create_lu: %s.', out)
return out
def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
"""Delete an logical unit. Used for iSCSI only
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool of the logical unit
:param lun: id of the logical unit being deleted
:returns: formated string 'Logical unit deleted successfully.'
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-lu', 'del', '-d',
'-f', lun,
check_exit_code=True)
LOG.debug('delete_lu: %(out)s -- %(err)s.', {'out': out, 'err': err})
return out
def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
"""Clones a volume
Clone primitive used to support all iSCSI snapshot/cloning functions.
Used for iSCSI only.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool of the logical unit
:param src_lun: id of the logical unit being deleted
:param size: size of the LU being cloned. Only for logging purposes
:returns: formated string
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-lu', 'clone', '-e',
src_lun, name,
'/.cinder/' + name + '.iscsi',
check_exit_code=True)
out = "LUN %s HDP: %s size: %s MB, is successfully created" \
% (name, hdp, size)
LOG.debug('create_dup: %(out)s -- %(err)s.', {'out': out, 'err': err})
return out
def file_clone(self, cmd, ip0, user, pw, fslabel, src, name):
"""Clones NFS files to a new one named 'name'
Clone primitive used to support all NFS snapshot/cloning functions.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param fslabel: file system label of the new file
:param src: source file
:param name: target path of the new created file
:returns: formated string
"""
_fsid = self._get_fsid(cmd, ip0, user, pw, fslabel)
_evsid = self.get_evs(cmd, ip0, user, pw, _fsid)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'file-clone-create', '-f', fslabel,
src, name,
check_exit_code=True)
out = "LUN %s HDP: %s Clone: %s -> %s" % (name, _fsid, src, name)
LOG.debug('file_clone: %(out)s -- %(err)s.', {'out': out, 'err': err})
return out
def extend_vol(self, cmd, ip0, user, pw, hdp, lun, new_size, name):
"""Extend a iSCSI volume.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool of the logical unit
:param lun: id of the logical unit being extended
:param new_size: new size of the LU
:param name: formated string
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-lu', 'expand',
name, new_size + 'M',
check_exit_code=True)
out = ("LUN: %s successfully extended to %s MB" % (name, new_size))
LOG.debug('extend_vol: %s.', out)
return out
@utils.retry(putils.ProcessExecutionError, retries=HNAS_SSC_RETRIES,
wait_random=True)
def add_iscsi_conn(self, cmd, ip0, user, pw, lun_name, hdp,
port, tgtalias, initiator):
"""Setup the lun on on the specified target port
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param lun_name: id of the logical unit being extended
:param hdp: data pool of the logical unit
:param port: iSCSI port
:param tgtalias: iSCSI qualified name
:param initiator: initiator address
"""
LOG.debug('Adding %(lun)s to %(tgt)s returns %(tgt)s.',
{'lun': lun_name, 'tgt': tgtalias})
found, lunid, tgt = self.check_lu(cmd, ip0, user, pw, lun_name, hdp)
evsid = self.get_evs(cmd, ip0, user, pw, hdp)
if found:
conn = (int(lunid), lun_name, initiator, int(lunid), tgt['iqn'],
int(lunid), hdp, port)
out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s "
"@ index: %d, and Target: %s @ index %d is "
"successfully paired @ CTL: %s, Port: %s.") % conn
else:
tgt = self._get_targets(cmd, ip0, user, pw, evsid, tgtalias)
lunid = self._get_unused_lunid(cmd, ip0, user, pw, tgt[0])
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", evsid,
'iscsi-target', 'addlu',
tgtalias, lun_name, six.text_type(lunid),
check_exit_code=True)
conn = (int(lunid), lun_name, initiator, int(lunid), tgt[0]['iqn'],
int(lunid), hdp, port)
out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s "
"@ index: %d, and Target: %s @ index %d is "
"successfully paired @ CTL: %s, Port: %s.") % conn
LOG.debug('add_iscsi_conn: returns %s.', out)
return out
def del_iscsi_conn(self, cmd, ip0, user, pw, evsid, iqn, hlun):
"""Remove the lun on on the specified target port
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param evsid: EVSID for the file system
:param iqn: iSCSI qualified name
:param hlun: logical unit id
:returns: formated string
"""
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", evsid,
'iscsi-target', 'list', iqn,
check_exit_code=True)
lines = out.split('\n')
out = ("H-LUN: %d already deleted from target %s" % (int(hlun), iqn))
# see if lun is already detached
for line in lines:
if line.startswith(' '):
lunline = line.split()[0]
if lunline[0].isdigit() and lunline == hlun:
out = ""
break
if out != "":
# hlun wasn't found
LOG.info(_LI('del_iscsi_conn: hlun not found %s.'), out)
return out
# remove the LU from the target
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", evsid,
'iscsi-target', 'dellu',
'-f', iqn, hlun,
check_exit_code=True)
out = "H-LUN: %d successfully deleted from target %s" \
% (int(hlun), iqn)
LOG.debug('del_iscsi_conn: %s.', out)
return out
def get_targetiqn(self, cmd, ip0, user, pw, targetalias, hdp, secret):
"""Obtain the targets full iqn
Returns the target's full iqn rather than its alias.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param targetalias: alias of the target
:param hdp: data pool of the logical unit
:param secret: CHAP secret of the target
:returns: string with full IQN
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'list', targetalias,
check_exit_code=True)
if "does not exist" in out:
if secret == "":
secret = '""'
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'add',
targetalias, secret,
check_exit_code=True)
else:
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'add',
targetalias, secret,
check_exit_code=True)
if "success" in out:
return targetalias
lines = out.split('\n')
# returns the first iqn
for line in lines:
if 'Alias' in line:
fulliqn = line.split()[2]
return fulliqn
def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret):
"""Sets the chap secret for the specified target.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param targetalias: alias of the target
:param hdp: data pool of the logical unit
:param secret: CHAP secret of the target
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'list',
targetalias,
check_exit_code=False)
if "does not exist" in out:
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'add',
targetalias, secret,
check_exit_code=True)
else:
LOG.info(_LI('targetlist: %s'), targetalias)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'mod',
'-s', secret, '-a', 'enable',
targetalias,
check_exit_code=True)
def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp):
"""Returns the chap secret for the specified target.
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param targetalias: alias of the target
:param hdp: data pool of the logical unit
:return secret: CHAP secret of the target
"""
_evsid = self.get_evs(cmd, ip0, user, pw, hdp)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
"--evs", _evsid,
'iscsi-target', 'list', targetalias,
check_exit_code=True)
enabled = ""
secret = ""
lines = out.split('\n')
for line in lines:
if 'Secret' in line:
if len(line.split()) > 2:
secret = line.split()[2]
if 'Authentication' in line:
enabled = line.split()[2]
if enabled == 'Enabled':
return secret
else:
return ""
def check_target(self, cmd, ip0, user, pw, hdp, target_alias):
"""Checks if a given target exists and gets its info
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: pool name used
:param target_alias: alias of the target
:returns: True if target exists
:returns: list with the target info
"""
LOG.debug("Checking if target %(tgt)s exists.", {'tgt': target_alias})
evsid = self.get_evs(cmd, ip0, user, pw, hdp)
tgt_list = self._get_targets(cmd, ip0, user, pw, evsid)
for tgt in tgt_list:
if tgt['alias'] == target_alias:
attached_luns = len(tgt['luns'])
LOG.debug("Target %(tgt)s has %(lun)s volumes.",
{'tgt': target_alias, 'lun': attached_luns})
return True, tgt
LOG.debug("Target %(tgt)s does not exist.", {'tgt': target_alias})
return False, None
def check_lu(self, cmd, ip0, user, pw, volume_name, hdp):
"""Checks if a given LUN is already mapped
:param cmd: ssc command name
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param volume_name: number of the LUN
:param hdp: storage pool of the LUN
:returns: True if the lun is attached
:returns: the LUN id
:returns: Info related to the target
"""
LOG.debug("Checking if vol %s (hdp: %s) is attached.",
volume_name, hdp)
evsid = self.get_evs(cmd, ip0, user, pw, hdp)
tgt_list = self._get_targets(cmd, ip0, user, pw, evsid)
for tgt in tgt_list:
if len(tgt['luns']) == 0:
continue
for lun in tgt['luns']:
lunid = lun['id']
lunname = lun['name']
if lunname[:29] == volume_name[:29]:
LOG.debug("LUN %(lun)s attached on %(lunid)s, "
"target: %(tgt)s.",
{'lun': volume_name, 'lunid': lunid, 'tgt': tgt})
return True, lunid, tgt
LOG.debug("LUN %(lun)s not attached.", {'lun': volume_name})
return False, 0, None
def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun):
"""Returns the information for the specified Logical Unit.
Returns the information of an existing Logical Unit on HNAS, according
to the name provided.
:param cmd: the command that will be run on SMU
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param fslabel: label of the file system
:param lun: label of the logical unit
"""
evs = self.get_evs(cmd, ip0, user, pw, fslabel)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs",
evs, 'iscsi-lu', 'list', lun)
return out
def rename_existing_lu(self, cmd, ip0, user, pw, fslabel,
new_name, vol_name):
"""Renames the specified Logical Unit.
Renames an existing Logical Unit on HNAS according to the new name
provided.
:param cmd: command that will be run on SMU
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param fslabel: label of the file system
:param new_name: new name to the existing volume
:param vol_name: current name of the existing volume
"""
evs = self.get_evs(cmd, ip0, user, pw, fslabel)
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs",
evs, "iscsi-lu", "mod", "-n", new_name,
vol_name)
return out
| apache-2.0 |
soerendip42/rdkit | rdkit/Chem/SimpleEnum/Enumerator.py | 2 | 8983 | #
# Copyright (c) 2014, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, May 2009
from __future__ import print_function
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import FunctionalGroups
from rdkit.Chem import rdChemReactions
import os
def PreprocessReaction(reaction,funcGroupFilename=os.path.join(RDConfig.RDDataDir,'Functional_Group_Hierarchy.txt'),propName='molFileValue'):
"""
>>> testFile = os.path.join(RDConfig.RDCodeDir,'Chem','SimpleEnum','test_data','boronic1.rxn')
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
>>> nWarn
0
>>> nError
0
>>> nReacts
2
>>> nProds
1
>>> reactantLabels
(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),))
If there are functional group labels in the input reaction (via atoms with molFileValue properties),
the corresponding atoms will have queries added to them so that they only match such things. We can
see this here:
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> r1 = rxn.GetReactantTemplate(0)
>>> m1 = Chem.MolFromSmiles('CCBr')
>>> m2 = Chem.MolFromSmiles('c1ccccc1Br')
These both match because the reaction file itself just has R1-Br:
>>> m1.HasSubstructMatch(r1)
True
>>> m2.HasSubstructMatch(r1)
True
After preprocessing, we only match the aromatic Br:
>>> d = PreprocessReaction(rxn)
>>> m1.HasSubstructMatch(r1)
False
>>> m2.HasSubstructMatch(r1)
True
We also support or queries in the values field (separated by commas):
>>> testFile = os.path.join(RDConfig.RDCodeDir,'Chem','SimpleEnum','test_data','azide_reaction.rxn')
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> reactantLabels = PreprocessReaction(rxn)[-1]
>>> reactantLabels
(((1, 'azide'),), ((1, 'carboxylicacid,acidchloride'),))
>>> m1 = Chem.MolFromSmiles('CC(=O)O')
>>> m2 = Chem.MolFromSmiles('CC(=O)Cl')
>>> m3 = Chem.MolFromSmiles('CC(=O)N')
>>> r2 = rxn.GetReactantTemplate(1)
>>> m1.HasSubstructMatch(r2)
True
>>> m2.HasSubstructMatch(r2)
True
>>> m3.HasSubstructMatch(r2)
False
unrecognized final group types are returned as None:
>>> testFile = os.path.join(RDConfig.RDCodeDir,'Chem','SimpleEnum','test_data','bad_value1.rxn')
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
Traceback (most recent call last):
File "/usr/prog/python/2.6.6_gnu/lib/python2.6/doctest.py", line 1253, in __run
compileflags, 1) in test.globs
File "<doctest __main__.PreprocessReaction[36]>", line 1, in <module>
nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
File "Enumerator.py", line 105, in PreprocessReaction
reactantLabels = reaction.AddRecursiveQueriesToReaction(queryDict, propName='molFileValue', getLabels=True)
RuntimeError: KeyErrorException
One unrecognized group type in a comma-separated list makes the whole thing fail:
>>> testFile = os.path.join(RDConfig.RDCodeDir,'Chem','SimpleEnum','test_data','bad_value2.rxn')
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
Traceback (most recent call last):
File "/usr/prog/python/2.6.6_gnu/lib/python2.6/doctest.py", line 1253, in __run
compileflags, 1) in test.globs
File "<doctest __main__.PreprocessReaction[36]>", line 1, in <module>
nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
File "Enumerator.py", line 105, in PreprocessReaction
reactantLabels = reaction.AddRecursiveQueriesToReaction(queryDict, propName='molFileValue', getLabels=True)
RuntimeError: KeyErrorException
>>> testFile = os.path.join(RDConfig.RDCodeDir,'Chem','SimpleEnum','test_data','bad_value3.rxn')
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
Traceback (most recent call last):
File "/usr/prog/python/2.6.6_gnu/lib/python2.6/doctest.py", line 1253, in __run
compileflags, 1) in test.globs
File "<doctest __main__.PreprocessReaction[36]>", line 1, in <module>
nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
File "Enumerator.py", line 105, in PreprocessReaction
reactantLabels = reaction.AddRecursiveQueriesToReaction(queryDict, propName='molFileValue', getLabels=True)
RuntimeError: KeyErrorException
>>> rxn = rdChemReactions.ChemicalReaction()
>>> nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(rxn)
>>> reactantLabels == []
True
"""
reaction._setImplicitPropertiesFlag(True)
reaction.Initialize()
nReactants = reaction.GetNumReactantTemplates()
nProducts = reaction.GetNumProductTemplates()
nWarn,nError = reaction.Validate()
if not nError:
try:
queryDict = Chem.ParseMolQueryDefFile(funcGroupFilename)
except:
raise IOError('cannot open', funcGroupFilename)
else:
reactantLabels = reaction.AddRecursiveQueriesToReaction(queryDict, propName, getLabels=True)
else:
reactantLabels = []
return nWarn,nError,nReactants,nProducts,reactantLabels
def EnumerateReaction(reaction,bbLists,uniqueProductsOnly=False,funcGroupFilename=os.path.join(RDConfig.RDDataDir,'Functional_Group_Hierarchy.txt'),propName='molFileValue'):
"""
>>> testFile = os.path.join(RDConfig.RDCodeDir,'Chem','SimpleEnum','test_data','boronic1.rxn')
>>> rxn = AllChem.ReactionFromRxnFile(testFile)
>>> reacts1=['Brc1ccccc1','Brc1ncccc1','Brc1cnccc1']
>>> reacts1=[Chem.MolFromSmiles(x) for x in reacts1]
>>> reacts2=['CCB(O)O','CCCB(O)O']
>>> reacts2=[Chem.MolFromSmiles(x) for x in reacts2]
>>> prods = EnumerateReaction(rxn,(reacts1,reacts2))
>>> prods = list(prods)
This is a bit nasty because of the symmetry of the boronic acid:
>>> len(prods)
12
>>> smis = list(set([Chem.MolToSmiles(x[0]) for x in prods]))
>>> smis.sort()
>>> len(smis)
6
>>> print(smis)
['CCCc1ccccc1', 'CCCc1ccccn1', 'CCCc1cccnc1', 'CCc1ccccc1', 'CCc1ccccn1', 'CCc1cccnc1']
The nastiness can be avoided at the cost of some memory by asking for only unique products:
>>> prods = EnumerateReaction(rxn,(reacts1,reacts2),uniqueProductsOnly=True)
>>> prods = list(prods)
>>> len(prods)
6
>>> print(sorted([Chem.MolToSmiles(x[0]) for x in prods]))
['CCCc1ccccc1', 'CCCc1ccccn1', 'CCCc1cccnc1', 'CCc1ccccc1', 'CCc1ccccn1', 'CCc1cccnc1']
"""
nWarn,nError,nReacts,nProds,reactantLabels = PreprocessReaction(reaction)
if nError: raise ValueError('bad reaction')
if len(bbLists) != nReacts: raise ValueError('%d reactants in reaction, %d bb lists supplied'%(nReacts,len(bbLists)))
def _uniqueOnly(lst):
seen=[]
for entry in lst:
if entry:
smi = '.'.join(sorted([Chem.MolToSmiles(x,True) for x in entry]))
if smi not in seen:
seen.append(smi)
yield entry
ps = AllChem.EnumerateLibraryFromReaction(reaction,bbLists)
if not uniqueProductsOnly:
return ps
else:
return _uniqueOnly(ps)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| bsd-3-clause |
khagler/boto | tests/unit/ec2/test_securitygroup.py | 112 | 8556 | #!/usr/bin/env python
from tests.compat import unittest
from tests.unit import AWSMockServiceTestCase
from boto.ec2.connection import EC2Connection
from boto.ec2.securitygroup import SecurityGroup
DESCRIBE_SECURITY_GROUP = br"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<securityGroupInfo>
<item>
<ownerId>111122223333</ownerId>
<groupId>sg-1a2b3c4d</groupId>
<groupName>WebServers</groupName>
<groupDescription>Web Servers</groupDescription>
<vpcId/>
<ipPermissions>
<item>
<ipProtocol>tcp</ipProtocol>
<fromPort>80</fromPort>
<toPort>80</toPort>
<groups/>
<ipRanges>
<item>
<cidrIp>0.0.0.0/0</cidrIp>
</item>
</ipRanges>
</item>
</ipPermissions>
<ipPermissionsEgress/>
</item>
<item>
<ownerId>111122223333</ownerId>
<groupId>sg-2a2b3c4d</groupId>
<groupName>RangedPortsBySource</groupName>
<groupDescription>Group A</groupDescription>
<ipPermissions>
<item>
<ipProtocol>tcp</ipProtocol>
<fromPort>6000</fromPort>
<toPort>7000</toPort>
<groups>
<item>
<userId>111122223333</userId>
<groupId>sg-3a2b3c4d</groupId>
<groupName>Group B</groupName>
</item>
</groups>
<ipRanges/>
</item>
</ipPermissions>
<ipPermissionsEgress/>
</item>
</securityGroupInfo>
</DescribeSecurityGroupsResponse>"""
DESCRIBE_INSTANCES = br"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2012-10-01/">
<requestId>c6132c74-b524-4884-87f5-0f4bde4a9760</requestId>
<reservationSet>
<item>
<reservationId>r-72ef4a0a</reservationId>
<ownerId>184906166255</ownerId>
<groupSet/>
<instancesSet>
<item>
<instanceId>i-instance</instanceId>
<imageId>ami-1624987f</imageId>
<instanceState>
<code>16</code>
<name>running</name>
</instanceState>
<privateDnsName/>
<dnsName/>
<reason/>
<keyName>mykeypair</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>m1.small</instanceType>
<launchTime>2012-12-14T23:48:37.000Z</launchTime>
<placement>
<availabilityZone>us-east-1d</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<kernelId>aki-88aa75e1</kernelId>
<monitoring>
<state>disabled</state>
</monitoring>
<subnetId>subnet-0dc60667</subnetId>
<vpcId>vpc-id</vpcId>
<privateIpAddress>10.0.0.67</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-1a2b3c4d</groupId>
<groupName>WebServerSG</groupName>
</item>
</groupSet>
<architecture>x86_64</architecture>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
<item>
<deviceName>/dev/sda1</deviceName>
<ebs>
<volumeId>vol-id</volumeId>
<status>attached</status>
<attachTime>2012-12-14T23:48:43.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</ebs>
</item>
</blockDeviceMapping>
<virtualizationType>paravirtual</virtualizationType>
<clientToken>foo</clientToken>
<tagSet>
<item>
<key>Name</key>
<value/>
</item>
</tagSet>
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
<item>
<networkInterfaceId>eni-id</networkInterfaceId>
<subnetId>subnet-id</subnetId>
<vpcId>vpc-id</vpcId>
<description>Primary network interface</description>
<ownerId>ownerid</ownerId>
<status>in-use</status>
<privateIpAddress>10.0.0.67</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>WebServerSG</groupName>
</item>
</groupSet>
<attachment>
<attachmentId>eni-attach-id</attachmentId>
<deviceIndex>0</deviceIndex>
<status>attached</status>
<attachTime>2012-12-14T23:48:37.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
<privateIpAddressesSet>
<item>
<privateIpAddress>10.0.0.67</privateIpAddress>
<primary>true</primary>
</item>
<item>
<privateIpAddress>10.0.0.54</privateIpAddress>
<primary>false</primary>
</item>
<item>
<privateIpAddress>10.0.0.55</privateIpAddress>
<primary>false</primary>
</item>
</privateIpAddressesSet>
</item>
</networkInterfaceSet>
<ebsOptimized>false</ebsOptimized>
</item>
</instancesSet>
</item>
</reservationSet>
</DescribeInstancesResponse>
"""
class TestDescribeSecurityGroups(AWSMockServiceTestCase):
connection_class = EC2Connection
def test_get_instances(self):
self.set_http_response(status_code=200, body=DESCRIBE_SECURITY_GROUP)
groups = self.service_connection.get_all_security_groups()
self.set_http_response(status_code=200, body=DESCRIBE_INSTANCES)
instances = groups[0].instances()
self.assertEqual(1, len(instances))
self.assertEqual(groups[0].id, instances[0].groups[0].id)
class SecurityGroupTest(unittest.TestCase):
def test_add_rule(self):
sg = SecurityGroup()
self.assertEqual(len(sg.rules), 0)
# Regression: ``dry_run`` was being passed (but unhandled) before.
sg.add_rule(
ip_protocol='http',
from_port='80',
to_port='8080',
src_group_name='groupy',
src_group_owner_id='12345',
cidr_ip='10.0.0.1',
src_group_group_id='54321',
dry_run=False
)
self.assertEqual(len(sg.rules), 1)
def test_remove_rule_on_empty_group(self):
# Remove a rule from a group with no rules
sg = SecurityGroup()
with self.assertRaises(ValueError):
sg.remove_rule('ip', 80, 80, None, None, None, None)
| mit |
RemieRichards/-tg-station | bot/C_maths.py | 36 | 2521 | ### EXPERIMENTAL PROTOTYPE ###
# e = 2.7182818284590452353602874713526624977572
# pi = math.pi
from __future__ import division #PYTHON Y U NO TELL ME THIS BEFORE
import math
import random
import re
e = "2.7182818284590452353602874713526624977572"
pi = str(math.pi)
global pre
pre = len("maths ")
def maths(influx,prefix="!",sender="NaN",debug=True,method="n"):
global pre
influx = influx.lower()
influx = influx[len(prefix)+pre:]
influx = influx.replace("pie",pi+"*"+e)
influx = influx.replace("e*",e+"*")
influx = influx.replace("*e","*"+e)
influx = influx.replace("pi",pi)
if debug:
print sender+":"+prefix+"maths"
if influx.count("**") == 0 and influx.count('"') == 0 and influx.count("'") == 0 and influx.count(";") == 0 and influx.count(":") == 0:
influx_low = influx.lower()
influx_hi = influx.upper()
if "0b" in influx_low:
influx_low = re.sub("0b[0-1]*","",influx_low)
influx_hi = re.sub("0B[0-1]*","",influx_hi)
if "0x" in influx_low:
influx_low = re.sub("0x[a-f0-9]*","",influx_low)
influx_hi = re.sub("0X[A-F0-9]*","",influx_hi)
if "rand" in influx_low:
influx_low = re.sub("rand","",influx_low)
influx_hi = re.sub("RAND","",influx_hi)
if influx_low == influx_hi:
influx = re.sub("rand","random.random()",influx)
try:
result = eval(influx.lower())
except ZeroDivisionError:
return "Divide by zero detected."
except SyntaxError:
return "Syntax Error detected."
except TypeError:
return "Type Error detected."
except:
return "Unknown Error detected."
else:
if method == "n": #Normal
return result
elif method == "i": #Forced Int
return int(result)
elif method == "h": #Hex
try:
if "L" in hex(result)[2:]:
return hex(result)[2:-1]
else:
return hex(result)[2:].upper()
except TypeError:
return "That value (%s) cannot be interpreted properly using !hmaths" %(str(result))
elif method == "b": #Binary
try:
return bin(result)[2:].upper()
except TypeError:
return "That value (%s) cannot be interpreted properly using !bmaths" %(str(result))
else:
return result
else:
return "What are you trying to make me do again?"
else:
return "Those are likely to make me hang"
| agpl-3.0 |
hslee16/ansible-modules-extras | system/firewalld.py | 10 | 22227 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
interface:
description:
- 'The interface you would like to add/remove to/from a zone in firewalld'
required: false
default: null
version_added: "2.1"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- "Should this configuration be in the running firewalld configuration or persist across reboots."
required: false
default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
masquerade:
description:
- 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
required: false
default: null
version_added: "2.1"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, who may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld: service=https permanent=true state=enabled
- firewalld: port=8081/tcp permanent=true state=disabled
- firewalld: port=161-162/udp permanent=true state=enabled
- firewalld: zone=dmz service=http permanent=true state=enabled
- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
- firewalld: source='192.168.1.0/24' zone=internal state=enabled
- firewalld: zone=trusted interface=eth2 permanent=true state=enabled
- firewalld: masquerade=yes state=enabled permanent=true zone=dmz
'''
import os
import re
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import Rich_Rule
from firewall.client import FirewallClient
fw = FirewallClient()
if not fw.connected:
HAS_FIREWALLD = False
else:
HAS_FIREWALLD = True
except ImportError:
HAS_FIREWALLD = False
#####################
# masquerade handling
#
def get_masquerade_enabled(zone):
if fw.queryMasquerade(zone) == True:
return True
else:
return False
def get_masquerade_enabled_permanent(zone):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if fw_settings.getMasquerade() == True:
return True
else:
return False
def set_masquerade_enabled(zone):
fw.addMasquerade(zone)
def set_masquerade_disabled(zone):
fw.removeMasquerade(zone)
def set_masquerade_permanent(zone, masquerade):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.setMasquerade(masquerade)
fw_zone.update(fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
if port_proto in fw.getPorts(zone):
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addPort(port, protocol)
fw_zone.update(fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removePort(port, protocol)
fw_zone.update(fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addSource(source)
fw_zone.update(fw_settings)
def remove_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeSource(source)
fw_zone.update(fw_settings)
####################
# interface handling
#
def get_interface(zone, interface):
if interface in fw.getInterfaces(zone):
return True
else:
return False
def change_zone_of_interface(zone, interface):
fw.changeZoneOfInterface(zone, interface)
def remove_interface(zone, interface):
fw.removeInterface(zone, interface)
def get_interface_permanent(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if interface in fw_settings.getInterfaces():
return True
else:
return False
def change_zone_of_interface_permanent(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
old_zone_name = fw.config().getZoneOfInterface(interface)
if old_zone_name != zone:
if old_zone_name:
old_zone_obj = fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def remove_interface_permanent(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeInterface(interface)
fw_zone.update(fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addService(service)
fw_zone.update(fw_settings)
def set_service_disabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeService(service)
fw_zone.update(fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw.getRichRules(zone):
return True
else:
return False
def set_rich_rule_enabled(zone, rule, timeout):
fw.addRichRule(zone, rule, timeout)
def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addRichRule(rule)
fw_zone.update(fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeRichRule(rule)
fw_zone.update(fw_settings)
def main():
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
immediate=dict(type='bool',default=False),
source=dict(required=False,default=None),
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
interface=dict(required=False,default=None),
masquerade=dict(required=False,default=None),
),
supports_check_mode=True
)
if module.params['source'] == None and module.params['permanent'] == None:
module.fail_json(msg='permanent is a required parameter')
if module.params['interface'] != None and module.params['zone'] == None:
module.fail(msg='zone is a required parameter')
if not HAS_FIREWALLD:
module.fail_json(msg='firewalld and its python 2 module are required for this module')
## Pre-run version checking
if FW_VERSION < "0.2.11":
module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
## Global Vars
changed=False
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
if module.params['port'] != None:
port, protocol = module.params['port'].split('/')
if protocol == None:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
if module.params['zone'] != None:
zone = module.params['zone']
else:
zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
## Check for firewalld running
try:
if fw.connected == False:
module.fail_json(msg='firewalld service must be running')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
version likely too old. Requires firewalld >= 2.0.11")
modification_count = 0
if service != None:
modification_count += 1
if port != None:
modification_count += 1
if rich_rule != None:
modification_count += 1
if interface != None:
modification_count += 1
if masquerade != None:
modification_count += 1
if modification_count > 1:
module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
if service != None:
if permanent:
is_enabled = get_service_enabled_permanent(zone, service)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_service_enabled_permanent(zone, service)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_service_disabled_permanent(zone, service)
changed=True
if immediate or not permanent:
is_enabled = get_service_enabled(zone, service)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_service_enabled(zone, service, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_service_disabled(zone, service)
changed=True
if changed == True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if source != None:
is_enabled = get_source(zone, source)
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
add_source(zone, source)
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_source(zone, source)
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
if port != None:
if permanent:
is_enabled = get_port_enabled_permanent(zone, [port, protocol])
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_port_enabled_permanent(zone, port, protocol)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_port_disabled_permanent(zone, port, protocol)
changed=True
if immediate or not permanent:
is_enabled = get_port_enabled(zone, [port,protocol])
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_port_enabled(zone, port, protocol, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_port_disabled(zone, port, protocol)
changed=True
if changed == True:
msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
desired_state))
if rich_rule != None:
if permanent:
is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_enabled_permanent(zone, rich_rule)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_disabled_permanent(zone, rich_rule)
changed=True
if immediate or not permanent:
is_enabled = get_rich_rule_enabled(zone, rich_rule)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_enabled(zone, rich_rule, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_disabled(zone, rich_rule)
changed=True
if changed == True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface != None:
if permanent:
is_enabled = get_interface_permanent(zone, interface)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface_permanent(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface_permanent(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
if immediate or not permanent:
is_enabled = get_interface(zone, interface)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
if masquerade != None:
if permanent:
is_enabled = get_masquerade_enabled_permanent(zone)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_permanent(zone, True)
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_permanent(zone, False)
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
if immediate or not permanent:
is_enabled = get_masquerade_enabled(zone)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_enabled(zone)
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_disabled(zone)
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
module.exit_json(changed=changed, msg=', '.join(msgs))
#################################################
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
newerthcom/savagerebirth | libs/python-2.72/Tools/pybench/Strings.py | 45 | 10877 | from pybench import Test
from string import join
class ConcatStrings(Test):
version = 2.0
operations = 10 * 5
rounds = 100000
def test(self):
# Make sure the strings are *not* interned
s = join(map(str,range(100)))
t = join(map(str,range(1,101)))
for i in xrange(self.rounds):
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
def calibrate(self):
s = join(map(str,range(100)))
t = join(map(str,range(1,101)))
for i in xrange(self.rounds):
pass
class CompareStrings(Test):
version = 2.0
operations = 10 * 5
rounds = 200000
def test(self):
# Make sure the strings are *not* interned
s = join(map(str,range(10)))
t = join(map(str,range(10))) + "abc"
for i in xrange(self.rounds):
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
def calibrate(self):
s = join(map(str,range(10)))
t = join(map(str,range(10))) + "abc"
for i in xrange(self.rounds):
pass
class CompareInternedStrings(Test):
version = 2.0
operations = 10 * 5
rounds = 300000
def test(self):
# Make sure the strings *are* interned
s = intern(join(map(str,range(10))))
t = s
for i in xrange(self.rounds):
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
t == s
t == s
t >= s
t > s
t < s
def calibrate(self):
s = intern(join(map(str,range(10))))
t = s
for i in xrange(self.rounds):
pass
class CreateStringsWithConcat(Test):
version = 2.0
operations = 10 * 5
rounds = 200000
def test(self):
for i in xrange(self.rounds):
s = 'om'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
s = s + 'xax'
s = s + 'xbx'
s = s + 'xcx'
s = s + 'xdx'
s = s + 'xex'
def calibrate(self):
for i in xrange(self.rounds):
pass
class StringSlicing(Test):
version = 2.0
operations = 5 * 7
rounds = 160000
def test(self):
s = join(map(str,range(100)))
for i in xrange(self.rounds):
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
def calibrate(self):
s = join(map(str,range(100)))
for i in xrange(self.rounds):
pass
### String methods
if hasattr('', 'lower'):
class StringMappings(Test):
version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 70000
def test(self):
s = join(map(chr,range(20)),'')
t = join(map(chr,range(50)),'')
u = join(map(chr,range(100)),'')
v = join(map(chr,range(256)),'')
for i in xrange(self.rounds):
s.lower()
s.lower()
s.lower()
s.lower()
s.lower()
s.upper()
s.upper()
s.upper()
s.upper()
s.upper()
s.title()
s.title()
s.title()
s.title()
s.title()
t.lower()
t.lower()
t.lower()
t.lower()
t.upper()
t.upper()
t.upper()
t.upper()
t.title()
t.title()
t.title()
t.title()
u.lower()
u.lower()
u.upper()
u.upper()
u.title()
u.title()
v.lower()
v.upper()
v.title()
def calibrate(self):
s = join(map(chr,range(20)),'')
t = join(map(chr,range(50)),'')
u = join(map(chr,range(100)),'')
v = join(map(chr,range(256)),'')
for i in xrange(self.rounds):
pass
class StringPredicates(Test):
version = 2.0
operations = 10 * 7
rounds = 100000
def test(self):
data = ('abc', '123', ' ', '\xe4\xf6\xfc', '\xdf'*10)
len_data = len(data)
for i in xrange(self.rounds):
s = data[i % len_data]
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdigit()
s.islower()
s.isspace()
s.istitle()
s.isupper()
def calibrate(self):
data = ('abc', '123', ' ', '\u1234\u2345\u3456', '\uFFFF'*10)
data = ('abc', '123', ' ', '\xe4\xf6\xfc', '\xdf'*10)
len_data = len(data)
for i in xrange(self.rounds):
s = data[i % len_data]
| gpl-2.0 |
mtlchun/edx | lms/tests.py | 7 | 2673 | """Tests for the lms module itself."""
import mimetypes
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from edxmako import add_lookup, LOOKUP
from lms import startup
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from util import keyword_substitution
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
self.assertIsNotNone(mimetype)
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_FEEDBACK_SUBMISSION': True})
class HelpModalTests(ModuleStoreTestCase):
"""Tests for the help modal"""
def setUp(self):
super(HelpModalTests, self).setUp()
self.course = CourseFactory.create()
def test_simple_test(self):
"""
Simple test to make sure that you don't get a 500 error when the modal
is enabled.
"""
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
class KeywordSubConfigTests(TestCase):
""" Tests for configuring keyword substitution feature """
def test_keyword_map_not_empty(self):
""" Ensure that the keyword subsitution map is non-empty """
self.assertFalse(keyword_substitution.keyword_function_map_is_empty())
def test_adding_keyword_map_is_noop(self):
""" Test that trying to add a new keyword mapping is a no-op """
existing_map = keyword_substitution.KEYWORD_FUNCTION_MAP
keyword_substitution.add_keyword_function_map({
'%%USER_ID%%': lambda x: x,
'%%USER_FULLNAME%%': lambda x: x,
})
self.assertDictEqual(existing_map, keyword_substitution.KEYWORD_FUNCTION_MAP)
| agpl-3.0 |
0x47d/atd.id | src/vault.py | 1 | 19707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import cPickle
import hashlib
import hmac
import os
import struct
import sys
import configobj
import simplejson as json
import ruamel.yaml as yaml
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from Crypto.Cipher import PKCS1_OAEP
from Crypto import Random
from PyQt4 import QtCore
from PyQt4 import QtGui
from trezorlib.client import BaseClient
from trezorlib.client import ProtocolMixin
from trezorlib.transport_hid import HidTransport
from trezorlib import messages_pb2
DeOS_VAULT_RSA_KEY_SIZE = 2048
DeOS_VAULT_SYMMETRIC_KEY_SIZE = 32
DeOS_VAULT_KEY_SIZE = 32
DeOS_VAULT_BLOCK_SIZE = 16
DeOS_VAULT_MAC_SIZE = 32
DeOS_VAULT_KEY_INDEX = 0 # column where key is shown in password table
DeOS_VAULT_PASSWD_INDEX = 1 # column where password is shown in password table
# column of QWidgetItem in whose data we cache decrypted passwords
DeOS_VAULT_CACHE_INDEX = 0
DeOS_VAULT_WINDOW_TITLE = "Vault"
def q2s(s):
"""
Convert QString to UTF-8 string object
"""
return str(s.toUtf8())
def s2q(s):
"""
Convert UTF-8 encoded string to QString
"""
return QtCore.QString.fromUtf8(s)
class Magic(object):
u = lambda fmt, s: struct.unpack(fmt, s)[0]
headerStr = 'TZPW'
hdr = u('!I', headerStr)
unlockNode = [hdr, u('!I', 'ULCK')] # for unlocking wrapped AES-CBC key.
# for generating keys for individual password groups.
groupNode = [hdr, u('!I', 'GRUP')]
# the unlock & backup keys are written this way to fit display nicely.
unlockKey = 'Decrypt master key?' # string to derive wrapping key from.
# for unlocking wrapped backup private RSA key.
backupNode = [hdr, u('!I', 'BKUP')]
# string to derive backup wrapping key from.
backupKey = 'Decrypt backup key?'
class Padding(object):
"""
PKCS#7 Padding for block cipher having 16-byte blocks
"""
def __init__(self, blocksize):
self.blocksize = blocksize
def pad(self, s):
BS = self.blocksize
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
def unpad(self, s):
return s[0:-ord(s[-1])]
class DeOS_Backup(object):
"""
Performs backup and restore for password storage.
"""
RSA_KEYSIZE = DeOS_VAULT_RSA_KEY_SIZE
SYMMETRIC_KEYSIZE = DeOS_VAULT_SYMMETRIC_KEY_SIZE
BLOCKSIZE = DeOS_VAULT_BLOCK_SIZE
def __init__(self, trezor):
"""
Create with no keys prepared.
@param trezor: client object used to encrypt private key
"""
self.trezor = trezor
self.publicKey = None
self.encryptedPrivate = None # encrypted private key.
# ephemeral key used to encrypt private RSA key.
self.encryptedEphemeral = None
# IV used to encrypt private key with ephemeral key.
self.ephemeralIv = None
def generate(self):
"""
Generate key and encrypt private key.
"""
key = RSA.generate(self.RSA_KEYSIZE)
privateDer = key.exportKey(format="DER")
self.publicKey = key.publickey()
self.wrapPrivateKey(privateDer)
def wrapPrivateKey(self, privateKey):
"""
Wrap serialized private key by encrypting it with trezor.
"""
# Trezor client won't allow to encrypt whole serialized RSA key
# in one go - it's too big. We need an ephemeral symmetric key
# and encrypt the small ephemeral with Trezor.
rng = Random.new()
ephemeral = rng.read(self.SYMMETRIC_KEYSIZE)
self.ephemeralIv = rng.read(self.BLOCKSIZE)
cipher = AES.new(ephemeral, AES.MODE_CBC, self.ephemeralIv)
padded = Padding(self.BLOCKSIZE).pad(privateKey)
self.encryptedPrivate = cipher.encrypt(padded)
self.encryptedEphemeral = self.trezor.encrypt_keyvalue(
Magic.backupNode, Magic.backupKey, ephemeral,
ask_on_encrypt=False, ask_on_decrypt=True)
def unwrapPrivateKey(self):
"""
Decrypt private RSA key using self.encryptedEphemeral from
self.encryptedPrivate. Encrypted ephemeral key will be
decrypted with Trezor.
@returns RSA private key as Crypto.RSA._RSAobj
"""
ephemeral = self.trezor.decrypt_keyvalue(Magic.backupNode,
Magic.backupKey,
self.encryptedEphemeral,
ask_on_encrypt=False,
ask_on_decrypt=True)
cipher = AES.new(ephemeral, AES.MODE_CBC, self.ephemeralIv)
padded = cipher.decrypt(self.encryptedPrivate)
privateDer = Padding(self.BLOCKSIZE).unpad(padded)
privateKey = RSA.importKey(privateDer)
return privateKey
def serialize(self):
"""
Return object data as serialized string.
"""
publicDer = self.publicKey.exportKey(format="DER")
picklable = (self.ephemeralIv,
self.encryptedEphemeral,
self.encryptedPrivate,
publicDer)
return cPickle.dumps(picklable, cPickle.HIGHEST_PROTOCOL)
def deserialize(self, serialized):
"""
Set object data from serialized string
"""
unpickled = cPickle.loads(serialized)
(self.ephemeralIv,
self.encryptedEphemeral,
self.encryptedPrivate,
publicDer) = unpickled
self.publicKey = RSA.importKey(publicDer)
def encryptPassword(self, password):
"""
Encrypt password with RSA under OAEP padding and return it.
Password must be shorter than modulus length minus padding
length.
"""
cipher = PKCS1_OAEP.new(self.publicKey)
encrypted = cipher.encrypt(password)
return encrypted
def decryptPassword(self, encryptedPassword, privateKey):
"""
Decrypt RSA-OAEP encrypted password.
"""
cipher = PKCS1_OAEP.new(privateKey)
password = cipher.decrypt(encryptedPassword)
return password
class DeOS_PasswordGroup(object):
"""
Holds data for one password group.
Each entry has three values:
- key
- symetrically AES-CBC encrypted password unlockable only by Trezor
- RSA-encrypted password for creating backup of all password groups
"""
def __init__(self):
self.entries = []
def addEntry(self, key, encryptedValue, backupValue):
"""
Add key-value-backud entry.
"""
self.entries.append((key, encryptedValue, backupValue))
def removeEntry(self, index):
"""
Remove entry at given index.
"""
self.entries.pop(index)
def updateEntry(self, index, key, encryptedValue, backupValue):
"""
Update pair at index with given key, value, and
backup-encrypted password.
"""
self.entries[index] = (key, encryptedValue, backupValue)
def entry(self, index):
"""
Return entry with given index.
"""
return self.entries[index]
class DeOS_PasswordMap(object):
"""
Storage of groups of passwords in memory.
"""
BLOCKSIZE = DeOS_VAULT_BLOCK_SIZE
MACSIZE = DeOS_VAULT_MAC_SIZE
KEYSIZE = DeOS_VAULT_KEY_SIZE
## On-disk format
# 4 bytes header "TZPW"
# 4 bytes data storage version, network order uint32_t
# 32 bytes AES-CBC-encrypted wrappedOuterKey
# 16 bytes IV
# 2 bytes backup private key size (B)
# B bytes encrypted backup key
# 4 bytes size of data following (N)
# N bytes AES-CBC encrypted blob containing pickled struct for pwd map
# 32 bytes HMAC-SHA256 over data w/ same key as AES-CBC data struct above
def __init__(self, trezor):
assert trezor is not None
self.groups = {}
self.trezor = trezor
self.outerKey = None # outer AES-CBC key
self.outerIv = None # IV for data blob encrypted with outerKey
self.backupKey = None
def addGroup(self, groupName):
"""
Add group by name as utf-8 encoded string
"""
self._add_group(groupName)
def _add_group(self, groupName):
if groupName in self.groups:
raise KeyError("Group name already exists")
self.groups[groupName] = DeOS_PasswordGroup()
def load(self, fname):
"""
Load encrypted passwords from disk file, decrypt outer
layer containing key names. Requires Trezor connected.
@throws IOError: if reading file failed
"""
self._load(fname)
def _load(self, fname):
with file(fname) as f:
header = f.read(len(Magic.headerStr))
if header != Magic.headerStr:
raise IOError("Bad header in storage file")
version = f.read(4)
if len(version) != 4 or struct.unpack("!I", version)[0] != 1:
raise IOError("Unknown version of storage file")
wrappedKey = f.read(DeOS_VAULT_KEY_SIZE)
if len(wrappedKey) != DeOS_VAULT_KEY_SIZE:
raise IOError("Corrupted disk format - bad wrapped key length")
self.outerKey = self.unwrapKey(wrappedKey)
self.outerIv = f.read(DeOS_VAULT_BLOCK_SIZE)
if len(self.outerIv) != DeOS_VAULT_BLOCK_SIZE:
raise IOError("Corrupted disk format - bad IV length")
lb = f.read(2)
if len(lb) != 2:
raise IOError("Corrupted disk format - bad backup key length")
lb = struct.unpack("!H", lb)[0]
self.backupKey = DeOS_Backup(self.trezor)
serializedBackup = f.read(lb)
if len(serializedBackup) != lb:
raise IOError("Corrupted disk format - not enough encrypted backup key bytes")
self.backupKey.deserialize(serializedBackup)
ls = f.read(4)
if len(ls) != 4:
raise IOError("Corrupted disk format - bad data length")
l = struct.unpack("!I", ls)[0]
encrypted = f.read(l)
if len(encrypted) != l:
raise IOError("Corrupted disk format - not enough data bytes")
hmacDigest = f.read(DeOS_VAULT_MAC_SIZE)
if len(hmacDigest) != DeOS_VAULT_MAC_SIZE:
raise IOError("Corrupted disk format - HMAC not complete")
# time-invariant HMAC comparison that also works with python 2.6
newHmacDigest = hmac.new(self.outerKey, encrypted, hashlib.sha256).digest()
hmacCompare = 0
for (ch1, ch2) in zip(hmacDigest, newHmacDigest):
hmacCompare |= int(ch1 != ch2)
if hmacCompare != 0:
raise IOError("Corrupted disk format - HMAC does not match or bad passphrase")
serialized = self.decryptOuter(encrypted, self.outerIv)
self.groups = cPickle.loads(serialized)
def save(self, fname):
"""
Write password database to disk, encrypt it. Requires Trezor
connected.
@throws IOError: if writing file failed
"""
self._save(fname)
def _save(self, fname):
assert len(self.outerKey) == DeOS_VAULT_KEY_SIZE
rnd = Random.new()
self.outerIv = rnd.read(DeOS_VAULT_BLOCK_SIZE)
wrappedKey = self.wrapKey(self.outerKey)
with file(fname, "wb") as f:
version = 1
f.write(Magic.headerStr)
f.write(struct.pack("!I", version))
f.write(wrappedKey)
f.write(self.outerIv)
serialized = cPickle.dumps(self.groups, cPickle.HIGHEST_PROTOCOL)
encrypted = self.encryptOuter(serialized, self.outerIv)
hmacDigest = hmac.new(self.outerKey, encrypted, hashlib.sha256).digest()
serializedBackup = self.backupKey.serialize()
lb = struct.pack("!H", len(serializedBackup))
f.write(lb)
f.write(serializedBackup)
l = struct.pack("!I", len(encrypted))
f.write(l)
f.write(encrypted)
f.write(hmacDigest)
f.flush()
f.close()
def encryptOuter(self, plaintext, iv):
"""
Pad and encrypt with self.outerKey
"""
return self._encrypt(plaintext, iv, self.outerKey)
def _encrypt(self, plaintext, iv, key):
"""
Pad plaintext with PKCS#5 and encrypt it.
"""
cipher = AES.new(key, AES.MODE_CBC, iv)
padded = Padding(DeOS_VAULT_BLOCK_SIZE).pad(plaintext)
return cipher.encrypt(padded)
def decryptOuter(self, ciphertext, iv):
"""
Decrypt with self.outerKey and unpad
"""
return self._decrypt(ciphertext, iv, self.outerKey)
def _decrypt(self, ciphertext, iv, key):
"""
Decrypt ciphertext, unpad it and return
"""
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext)
unpadded = Padding(DeOS_VAULT_BLOCK_SIZE).unpad(plaintext)
return unpadded
def unwrapKey(self, wrappedOuterKey):
"""
Decrypt wrapped outer key using Trezor.
"""
return self._unwrap_key(wrappedOuterKey)
def _unwrap_key(self, key):
return self.trezor.decrypt_keyvalue(Magic.unlockNode,
Magic.unlockKey,
key,
ask_on_encrypt=False,
ask_on_decrypt=True)
def wrapKey(self, keyToWrap):
"""
Encrypt/wrap a key. Its size must be multiple of 16.
"""
return self._wrap_key(keyToWrap)
def _wrap_key(self, key):
return self.trezor.encrypt_keyvalue(Magic.unlockNode,
Magic.unlockKey,
key,
ask_on_encrypt=False,
ask_on_decrypt=True)
def encryptPassword(self, password, groupName):
"""
Encrypt a password. Does PKCS#5 padding before encryption.
Store IV as first block.
@param groupName key that will be shown to user on Trezor and
used to encrypt the password. A string in utf-8
"""
return self._encrypt_password(password, groupName)
def _encrypt_password(self, password, groupName):
rnd = Random.new()
rndBlock = rnd.read(DeOS_VAULT_BLOCK_SIZE)
padded = Padding(DeOS_VAULT_BLOCK_SIZE).pad(password)
ugroup = groupName.decode("utf-8")
return rndBlock + self.trezor.encrypt_keyvalue(Magic.groupNode,
ugroup,
padded,
ask_on_encrypt=False,
ask_on_decrypt=True,
iv=rndBlock)
def decryptPassword(self, encryptedPassword, groupName):
"""
Decrypt a password. First block is IV. After decryption strips
PKCS#5 padding.
@param groupName key that will be shown to user on Trezor and
was used to encrypt the password. A string in utf-8.
"""
return self._decrypt_password(encryptedPassword, groupName)
def _decrypt_password(self, encryptedPassword, groupName):
ugroup = groupName.decode("utf-8")
iv, encryptedPassword = encryptedPassword[:DeOS_VAULT_BLOCK_SIZE],\
encryptedPassword[DeOS_VAULT_BLOCK_SIZE:]
plain = self.trezor.decrypt_keyvalue(Magic.groupNode,
ugroup,
encryptedPassword,
ask_on_encrypt=False,
ask_on_decrypt=True,
iv=iv)
return Padding(DeOS_VAULT_BLOCK_SIZE).unpad(plain)
class DeOS_Vault(QtGui.QMainWindow):
def __init__(self, passwds, database):
"""
@param passwds: a PasswordMap instance w/ encrypted passwords
@param database: file name for saving pwMap
"""
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self._set_window_title()
self._set_modified()
self._set_database_filename(database)
self._set_password_map(passwds)
self._set_selected_group()
self._set_groups_model(header_labels=['Password group'])
self._set_groups_filter()
def _get_window_title(self, modified=False):
res = self.window_title
if modified:
res = res+'*'*int(modified)
return res
def _set_selected_group(self, selected_group=None):
self.selectedGroup = selected_group
def _set_password_map(self, passwds):
self.pwMap = passwds
def _set_database_filename(self, database):
self.dbFilename = database
def _set_groups_filter(self):
self.groupsFilter = QtGui.QSortFilterProxyModel()
self.groupsFilter.setSourceModel(self.groupsModel)
def _set_groups_model(self, header_labels):
self.groupsModel = QtGui.QStandardItemModel()
self.groupsModel.setHorizontalHeaderLabels(header_labels)
def _set_modified(self, modified=False):
self.modified = modified # modified flag "Save?" question on exit
def _set_window_title(self, title=DeOS_VAULT_WINDOW_TITLE):
self.window_title = title
class DeOS_VaultSettings(object):
def __init__(self):
self.dbFilename = None
self.settings = QtCore.QSettings("ConstructibleUniverse", "TrezorPass")
fname = self.settings.value("database/filename")
if fname.isValid():
self.dbFilename = q2s(fname.toString())
def store(self):
self.settings.setValue("database/filename", s2q(self.dbFilename))
class DeOS_TrezorMixin(object):
"""
Mixin for input of passhprases.
"""
def __init__(self, *args, **kwargs):
super(DeOS_TrezorMixin, self).__init__(*args, **kwargs)
self.passphrase = None
def callback_ButtonRequest(self, msg):
return messages_pb2.ButtonAck()
def callback_PassphraseRequest(self, msg):
if self.passphrase is not None:
return messages_pb2.PassphraseAck(passphrase=self.passphrase)
dialog = TrezorPassphraseDialog()
if not dialog.exec_():
sys.exit(3)
else:
passphrase = dialog.passphraseEdit.text()
passphrase = unicode(passphrase)
return messages_pb2.PassphraseAck(passphrase=passphrase)
def callback_PinMatrixRequest(self, msg):
dialog = EnterPinDialog()
if not dialog.exec_():
sys.exit(7)
pin = q2s(dialog.pin())
return messages_pb2.PinMatrixAck(pin=pin)
def prefillPassphrase(self, passphrase):
"""
Instead of asking for passphrase, use this one.
"""
self.passphrase = passphrase.decode("utf-8")
class DeOS_TrezorClient(ProtocolMixin, DeOS_TrezorMixin, BaseClient):
"""
Trezor client with Qt input methods
"""
pass
class DeOS_Trezor(object):
def __init__(self):
self.passphrase = None
def _get_devices(self):
"""
Returns Trezor HID devices
"""
return HidTransport.enumerate()
| gpl-3.0 |
rh890127a/Palabos_SW | scons/scons-local-2.1.0/SCons/Tool/sunf95.py | 21 | 2176 | """SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| agpl-3.0 |
sanoma/django-arctic | arctic/bin/arctic.py | 1 | 6472 | #!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import os
import stat
from optparse import OptionParser
from django.core.management import ManagementUtility
class bcolors:
"""
ANSI escape sequences for terminal colors
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def create_project(parser, options, args):
# Validate args
if len(args) < 2:
parser.error("Please specify a name for your Arctic installation")
elif len(args) > 3:
parser.error("Too many arguments")
project_name = args[1]
try:
dest_dir = args[2]
except IndexError:
dest_dir = ""
# Make sure given name is not already in use by another
# python package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
parser.error(
'"{}" conflicts with the name of an existing '
"Python module and cannot be used as a project "
"name. Please try another name.".format(project_name)
)
print("Creating an Arctic project named {}".format(project_name))
# Create the project from the Arctic template using startapp
# First find the path to Arctic
import arctic
arctic_path = os.path.dirname(arctic.__file__)
template_path = os.path.join(arctic_path, "project_template/start")
# Call django-admin startproject
utility_args = [
"django-admin.py",
"startproject",
"--template=" + template_path,
"--ext=html,rst",
project_name,
]
if dest_dir:
utility_args.append(dest_dir)
utility = ManagementUtility(utility_args)
utility.execute()
# add execute permission to manage.py, somehow it gets lost on the way
manage_py = os.path.join(dest_dir or project_name, "manage.py")
st = os.stat(manage_py)
os.chmod(
manage_py, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
print(
"Congratulations! {0} has been created.\n"
"The next steps are:\n"
"- In config/settings.py change the database settings (if needed).\n"
"- Run database migrations: {0}/manage.py migrate.\n"
"- Create an admin user: {0}/manage.py createsuperuser.\n"
"- Finally run the project: {0}/manage.py runserver.\n".format(
project_name
)
)
def create_app(parser, options, args):
# Validate args
if len(args) < 2:
parser.error("Please specify a name for your app")
elif len(args) > 3:
parser.error("Too many arguments")
app_name = args[1].lower()
try:
dest_dir = args[2]
except IndexError:
dest_dir = ""
# Make sure given name is not already in use by another
# python package/module.
try:
__import__(app_name)
except ImportError:
pass
else:
parser.error(
'"{}" conflicts with the name of an existing '
"Python module and cannot be used as an app "
"name. Please try another name.".format(app_name)
)
print(
(
bcolors.HEADER + "Creating an App named {}" + bcolors.ENDC + "\n"
).format(app_name)
)
# First find the path to Arctic
import arctic
arctic_path = os.path.dirname(arctic.__file__)
template_path = os.path.join(arctic_path, "project_template/app")
# Call django-admin starrtapp
utility_args = [
"django-admin.py",
"startapp",
"--template=" + template_path,
app_name,
]
if dest_dir:
utility_args.append(dest_dir)
utility = ManagementUtility(utility_args)
utility.execute()
print(
(
"Congratulations! {0} folder has been created it contains the "
"following structure.\n\n" + bcolors.OKBLUE + " -{0}\n"
" ---__init__.py\n"
" ---apps.py\n"
" ---forms.py\n"
" ---models.py\n"
" ---urls.py\n"
" ---views.py\n\n" + bcolors.ENDC + "The next steps are:\n\n"
" Add the app name to "
+ bcolors.UNDERLINE
+ "INSTALLED_APPS"
+ bcolors.ENDC
+ " in the settings.py\n" # NOQA
+ bcolors.OKGREEN
+ '"{0}",'
+ bcolors.ENDC
+ "\n"
" Add the app name and path to "
+ bcolors.UNDERLINE
+ "ARCTIC_MENU"
+ bcolors.ENDC
+ " in the settings.py\n" # NOQA
+ bcolors.OKGREEN
+ '("{1}", "{0}:list", "fa-folder"),'
+ bcolors.ENDC
+ "\n" # NOQA
" Add the urls to config/urls.py.\n"
+ bcolors.OKGREEN
+ 'url(r"^{0}/", include("{0}.urls", "{0}")),'
+ bcolors.ENDC
+ "\n" # NOQA
" Add fields in the models.py file\n"
"- Run "
+ bcolors.OKGREEN
+ "./manage.py makemigrations {0}"
+ bcolors.ENDC
+ "\n" # NOQA
"- Run "
+ bcolors.OKGREEN
+ "./manage.py migrate"
+ bcolors.ENDC
+ "\n\n" # NOQA
"The "
+ bcolors.BOLD
+ "forms.py"
+ bcolors.ENDC
+ " has a form with all the fields in the model and \n" # NOQA
"the "
+ bcolors.BOLD
+ "views.py"
+ bcolors.ENDC
+ " contains views for list, create, edit and delete. \n" # NOQA
"All of then can be tweaked to better satisfy the needs of the "
"project/app\n"
).format(app_name, app_name.capitalize())
)
COMMANDS = {
"start": create_project,
"createapp": create_app,
}
def main():
# Parse options
parser = OptionParser(
usage="Usage: arctic start project_name [directory]"
"Usage: arctic createapp appname [directory]"
)
(options, args) = parser.parse_args()
# Find command
try:
command = args[0]
except IndexError:
parser.print_help()
return
if command in COMMANDS:
COMMANDS[command](parser, options, args)
else:
parser.error("Unrecognised command: " + command)
if __name__ == "__main__":
main()
| mit |
defcello/PatchCorral | src/gui/ui_voicelists.py | 2 | 6632 | ####################################################################################################
# Copyright 2013 John Crawford
#
# This file is part of PatchCorral.
#
# PatchCorral is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PatchCorral is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PatchCorral. If not, see <http://www.gnu.org/licenses/>.
####################################################################################################
## @file
# Initializes the GUI for SynthNav.
from PySide import QtGui, QtCore
from patchcorral.src.engine import mididevice
import traceback
##
# Base class for voice list widgets.
class VoiceListWidget(QtGui.QWidget):
## Currently-assigned patchcorral.src.engine.mididevice.MIDIVoiceList object.
voiceList = None
class TableWidget(QtGui.QTableWidget):
keyPressed = QtCore.Signal(QtGui.QKeyEvent)
def keyPressEvent(self, event):
super().keyPressEvent(event)
self.keyPressed.emit(event)
def __init__(self, parent, synthNav):
super().__init__(parent)
self.synthNav = synthNav
assert self.voiceList is not None, '"self.voiceList" needs to be populated by the subclass.'
self.voiceMap = {}
if len(self.voiceList) == 0:
self.cols = mididevice.MIDIVoice.tags
else:
self.cols = list(self.voiceList[0].keys())
self.numCols = len(self.cols)
#Create widgets.
self.tw_currVoices = self.TableWidget(0, self.numCols, self)
self.tw_currVoices.setHorizontalHeaderLabels(self.cols)
#Lay it out.
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.addWidget(self.tw_currVoices)
#Populate voices.
self.setVoiceList(self.voiceList)
#Connect signals.
self.tw_currVoices.keyPressed.connect(self.onKeypressEvent)
def onKeypressEvent(self, event):
pass
def refreshCurrVoices(self):
print("refreshCurrVoices called")
rowCountI = self.tw_currVoices.rowCount()
rowCountF = min(len(self.voiceList), 1000)
self.tw_currVoices.clearContents()
print("refreshCurrVoices setting row count")
self.tw_currVoices.setRowCount(rowCountF)
assert self.tw_currVoices.rowCount() == rowCountF, '{} != {}'.format(self.tw_currVoices.rowCount(), rowCountF)
print("refreshCurrVoices entering for loops")
for row, voice in zip(range(rowCountF), self.voiceList):
for col, attr in enumerate(self.cols):
item = self.tw_currVoices.item(row, col)
if item is None or item is 0:
item = QtGui.QTableWidgetItem(str(voice[attr]))
isNewItem = True
else:
item.setText(str(voice[attr]))
isNewItem = False
# self.voiceMap[voice] = item
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.voice = voice
if isNewItem:
self.tw_currVoices.setItem(row, col, item)
print("refreshCurrVoices returning")
def setVoiceList(self, voiceList):
oVoiceList = self.voiceList
if isinstance(voiceList, str):
voiceList = self.synthNav.getVoiceList(voiceList)
self.voiceList = voiceList
try:
self.refreshCurrVoices()
except:
self.voiceList = oVoiceList
self.refreshCurrVoices()
raise
else:
try:
self.voiceList.listModified.disconnect(self.refreshCurrVoices)
except:
traceback.print_exc()
self.voiceList.listModified.connect(self.refreshCurrVoices)
##
# Widget displaying voices that remain after applying the selected filters.
class FilteredVoiceListWidget(VoiceListWidget):
voiceDoubleClicked = QtCore.Signal(mididevice.MIDIVoice)
def __init__(self, parent, synthNav):
self.voiceList = synthNav.getFilteredVoiceList()
super().__init__(parent, synthNav)
self.tw_currVoices.itemDoubleClicked.connect(self.onItemDoubleClicked)
self.tw_currVoices.itemSelectionChanged.connect(self.onItemSelectionChanged)
def onItemDoubleClicked(self, item):
self.voiceDoubleClicked.emit(item.voice)
def onItemSelectionChanged(self):
selectedItems = self.tw_currVoices.selectedItems()
if len(selectedItems) > 0:
selectedItems[0].voice.pc()
def onKeypressEvent(self, event):
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
self.synthNav.getVoiceList('queued').adds(item.voice for item in self.tw_currVoices.selectedItems())
class VoiceListSelectWidget(QtGui.QComboBox):
selectionChanged = QtCore.Signal(str)
def __init__(self, parent, synthNav):
super().__init__(parent)
self.addItems(list(synthNav.voiceLists.keys()))
self.currentIndexChanged.connect(self.onCurrentIndexChanged)
def onCurrentIndexChanged(self, idx):
self.selectionChanged.emit(self.itemText(idx))
##
# Widget displaying voices in the currently-selected user list.
class VoiceListEditWidget(VoiceListWidget):
def __init__(self, parent, synthNav, voiceList="queued"):
self.voiceList = synthNav.getVoiceList(voiceList)
super().__init__(parent, synthNav)
self.pb_clearQueue = QtGui.QPushButton("Clear Queue")
self.vbox.addWidget(self.pb_clearQueue)
self.pb_clearQueue.pressed.connect(self.onClearButtonPressed)
self.tw_currVoices.itemDoubleClicked.connect(self.onItemDoubleClicked)
def onClearButtonPressed(self):
self.voiceList.clear()
def onItemDoubleClicked(self, item):
item.voice.pc()
def onKeypressEvent(self, event):
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
items = self.tw_currVoices.selectedItems()
try:
item = items[0]
except IndexError:
pass
else:
item.voice.pc()
elif event.key() in [QtCore.Qt.Key_Delete]:
currCell = [self.tw_currVoices.currentRow(), self.tw_currVoices.currentColumn()]
items = self.tw_currVoices.selectedItems()
self.voiceList.remove(*(item.voice for item in items))
if currCell[0] >= self.tw_currVoices.rowCount():
currCell[0] = self.tw_currVoices.rowCount()
self.tw_currVoices.setCurrentCell(*currCell)
| gpl-3.0 |
behrtam/xpython | exercises/dnd-character/dnd_character_test.py | 1 | 2692 | import unittest
from dnd_character import Character, modifier
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class DndCharacterTest(unittest.TestCase):
def test_ability_modifier_for_score_3_is_n4(self):
self.assertEqual(modifier(3), -4)
def test_ability_modifier_for_score_4_is_n3(self):
self.assertEqual(modifier(4), -3)
def test_ability_modifier_for_score_5_is_n3(self):
self.assertEqual(modifier(5), -3)
def test_ability_modifier_for_score_6_is_n2(self):
self.assertEqual(modifier(6), -2)
def test_ability_modifier_for_score_7_is_n2(self):
self.assertEqual(modifier(7), -2)
def test_ability_modifier_for_score_8_is_n1(self):
self.assertEqual(modifier(8), -1)
def test_ability_modifier_for_score_9_is_n1(self):
self.assertEqual(modifier(9), -1)
def test_ability_modifier_for_score_10_is_0(self):
self.assertEqual(modifier(10), 0)
def test_ability_modifier_for_score_11_is_0(self):
self.assertEqual(modifier(11), 0)
def test_ability_modifier_for_score_12_is_1(self):
self.assertEqual(modifier(12), 1)
def test_ability_modifier_for_score_13_is_1(self):
self.assertEqual(modifier(13), 1)
def test_ability_modifier_for_score_14_is_2(self):
self.assertEqual(modifier(14), 2)
def test_ability_modifier_for_score_15_is_2(self):
self.assertEqual(modifier(15), 2)
def test_ability_modifier_for_score_16_is_3(self):
self.assertEqual(modifier(16), 3)
def test_ability_modifier_for_score_17_is_3(self):
self.assertEqual(modifier(17), 3)
def test_ability_modifier_for_score_18_is_4(self):
self.assertEqual(modifier(18), 4)
def test_random_ability_is_within_range(self):
score = Character().ability()
self.assertIs(score >= 3 and score <= 18, True)
def test_random_character_is_valid(self):
Char = Character()
self.assertIs(Char.strength >= 3 and Char.strength <= 18, True)
self.assertIs(Char.dexterity >= 3 and Char.dexterity <= 18, True)
self.assertIs(Char.constitution >= 3 and Char.constitution <= 18, True)
self.assertIs(Char.intelligence >= 3 and Char.intelligence <= 18, True)
self.assertIs(Char.wisdom >= 3 and Char.wisdom <= 18, True)
self.assertIs(Char.charisma >= 3 and Char.charisma <= 18, True)
self.assertIs(Char.hitpoints == 10 + modifier(Char.constitution), True)
def test_each_ability_is_only_calculated_once(self):
Char = Character()
self.assertIs(Char.strength == Char.strength, True)
if __name__ == "__main__":
unittest.main()
| mit |
mammique/django | django/utils/translation/__init__.py | 2 | 6280 | """
Internationalization support.
"""
from __future__ import unicode_literals
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
]
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join([force_text(s) for s in strings])
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
raise KeyError("Unknown language code %r." % lang_code)
| bsd-3-clause |
LarryHillyer/PoolHost | PoolHost/env/Lib/site-packages/django/core/management/commands/runserver.py | 64 | 6371 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument(
'addrport', nargs='?',
help='Optional port number, or ipaddr:port'
)
parser.add_argument(
'--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.',
)
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.',
)
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.',
)
def execute(self, *args, **options):
if options['no_color']:
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options['use_ipv6']
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options['addrport']:
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = self.use_ipv6
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options['use_reloader']
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options['use_threading']
# 'shutdown_message' is a stealth option.
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
# Kept for backward compatibility
BaseRunserverCommand = Command
| gpl-3.0 |
Qalthos/ansible | lib/ansible/modules/network/aci/aci_tenant_ep_retention_policy.py | 19 | 10848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_tenant_ep_retention_policy
short_description: Manage End Point (EP) retention protocol policies (fv:EpRetPol)
description:
- Manage End Point (EP) retention protocol policies on Cisco ACI fabrics.
version_added: '2.4'
options:
tenant:
description:
- The name of an existing tenant.
type: str
aliases: [ tenant_name ]
epr_policy:
description:
- The name of the end point retention policy.
type: str
aliases: [ epr_name, name ]
bounce_age:
description:
- Bounce entry aging interval in seconds.
- Accepted values range between C(150) and C(65535); 0 is used for infinite.
- The APIC defaults to C(630) when unset during creation.
type: int
bounce_trigger:
description:
- Determines if the bounce entries are installed by RARP Flood or COOP Protocol.
- The APIC defaults to C(coop) when unset during creation.
type: str
choices: [ coop, flood ]
hold_interval:
description:
- Hold interval in seconds.
- Accepted values range between C(5) and C(65535).
- The APIC defaults to C(300) when unset during creation.
type: int
local_ep_interval:
description:
- Local end point aging interval in seconds.
- Accepted values range between C(120) and C(65535); 0 is used for infinite.
- The APIC defaults to C(900) when unset during creation.
type: int
remote_ep_interval:
description:
- Remote end point aging interval in seconds.
- Accepted values range between C(120) and C(65535); 0 is used for infinite.
- The APIC defaults to C(300) when unset during creation.
type: int
move_frequency:
description:
- Move frequency per second.
- Accepted values range between C(0) and C(65535); 0 is used for none.
- The APIC defaults to C(256) when unset during creation.
type: int
description:
description:
- Description for the End point rentention policy.
type: str
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
seealso:
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:EpRetPol).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Swetha Chunduri (@schunduri)
'''
EXAMPLES = r'''
- name: Add a new EPR policy
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
bounce_age: 630
hold_interval: 300
local_ep_interval: 900
remote_ep_interval: 300
move_frequency: 256
description: test
state: present
delegate_to: localhost
- name: Remove an EPR policy
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
state: absent
delegate_to: localhost
- name: Query an EPR policy
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
state: query
delegate_to: localhost
register: query_result
- name: Query all EPR policies
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
BOUNCE_TRIG_MAPPING = dict(
coop='protocol',
rarp='rarp-flood',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
epr_policy=dict(type='str', aliases=['epr_name', 'name']), # Not required for querying all objects
bounce_age=dict(type='int'),
bounce_trigger=dict(type='str', choices=['coop', 'flood']),
hold_interval=dict(type='int'),
local_ep_interval=dict(type='int'),
remote_ep_interval=dict(type='int'),
description=dict(type='str', aliases=['descr']),
move_frequency=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epr_policy', 'tenant']],
['state', 'present', ['epr_policy', 'tenant']],
],
)
epr_policy = module.params['epr_policy']
bounce_age = module.params['bounce_age']
if bounce_age is not None and bounce_age != 0 and bounce_age not in range(150, 65536):
module.fail_json(msg="The bounce_age must be a value of 0 or between 150 and 65535")
if bounce_age == 0:
bounce_age = 'infinite'
bounce_trigger = module.params['bounce_trigger']
if bounce_trigger is not None:
bounce_trigger = BOUNCE_TRIG_MAPPING[bounce_trigger]
description = module.params['description']
hold_interval = module.params['hold_interval']
if hold_interval is not None and hold_interval not in range(5, 65536):
module.fail_json(msg="The hold_interval must be a value between 5 and 65535")
local_ep_interval = module.params['local_ep_interval']
if local_ep_interval is not None and local_ep_interval != 0 and local_ep_interval not in range(120, 65536):
module.fail_json(msg="The local_ep_interval must be a value of 0 or between 120 and 65535")
if local_ep_interval == 0:
local_ep_interval = "infinite"
move_frequency = module.params['move_frequency']
if move_frequency is not None and move_frequency not in range(65536):
module.fail_json(msg="The move_frequency must be a value between 0 and 65535")
if move_frequency == 0:
move_frequency = "none"
remote_ep_interval = module.params['remote_ep_interval']
if remote_ep_interval is not None and remote_ep_interval not in range(120, 65536):
module.fail_json(msg="The remote_ep_interval must be a value of 0 or between 120 and 65535")
if remote_ep_interval == 0:
remote_ep_interval = "infinite"
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvEpRetPol',
aci_rn='epRPol-{0}'.format(epr_policy),
module_object=epr_policy,
target_filter={'name': epr_policy},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvEpRetPol',
class_config=dict(
name=epr_policy,
descr=description,
bounceAgeIntvl=bounce_age,
bounceTrig=bounce_trigger,
holdIntvl=hold_interval,
localEpAgeIntvl=local_ep_interval,
remoteEpAgeIntvl=remote_ep_interval,
moveFreq=move_frequency,
),
)
aci.get_diff(aci_class='fvEpRetPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
sumeetsk/rank1bandits | envs/SpikeEnv.py | 1 | 1529 | import numpy as np
class SpikeEnv:
def __init__(self, K = 4, L = 4, baseu = 0.5, gapu = 0.4, basev = 0.5, gapv = 0.4):
self.K = K
self.L = L
self.ubar = baseu * np.ones(K)
self.ubar[K // 2] += gapu
self.vbar = basev * np.ones(L)
self.vbar[L // 2] += gapv
self.ut = np.zeros(K)
self.vt = np.zeros(L)
def num_rows(self):
return self.K
def num_cols(self):
return self.L
def randomize(self):
# randomize row and column variables
self.ut = np.array(np.random.uniform(size = self.K) < self.ubar, dtype = np.int)
self.vt = np.array(np.random.uniform(size = self.L) < self.vbar, dtype = np.int)
def reward(self, action):
# reward of action (row-column pair)
(i, j) = tuple(action)
return self.ut[i] * self.vt[j]
def regret(self, action):
# regret of action (row-column pair)
(i, j) = tuple(action)
return self.ut[np.argmax(self.ubar)] * self.vt[np.argmax(self.vbar)] - self.ut[i] * self.vt[j]
def pregret(self, action):
# pseudo-regret of action (row-column pair)
(i, j) = tuple(action)
return np.amax(self.ubar) * np.amax(self.vbar) - self.ubar[i] * self.vbar[j]
def plot(self):
# plot row and column probabilities
fig, (left, right) = plt.subplots(ncols = 2, figsize = (14, 4))
left.plot(self.ubar)
right.plot(self.vbar)
| gpl-3.0 |
noroutine/ansible | test/units/mock/procenv.py | 141 | 2636 | # (c) 2016, Matt Davis <mdavis@ansible.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible.compat.tests import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
| gpl-3.0 |
nowls/gnuradio | docs/doxygen/doxyxml/base.py | 333 | 6794 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
A base class is created.
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
import os
import pdb
from xml.parsers.expat import ExpatError
from generated import compound
class Base(object):
class Duplicate(StandardError):
pass
class NoSuchMember(StandardError):
pass
class ParsingError(StandardError):
pass
def __init__(self, parse_data, top=None):
self._parsed = False
self._error = False
self._parse_data = parse_data
self._members = []
self._dict_members = {}
self._in_category = {}
self._data = {}
if top is not None:
self._xml_path = top._xml_path
# Set up holder of references
else:
top = self
self._refs = {}
self._xml_path = parse_data
self.top = top
@classmethod
def from_refid(cls, refid, top=None):
""" Instantiate class from a refid rather than parsing object. """
# First check to see if its already been instantiated.
if top is not None and refid in top._refs:
return top._refs[refid]
# Otherwise create a new instance and set refid.
inst = cls(None, top=top)
inst.refid = refid
inst.add_ref(inst)
return inst
@classmethod
def from_parse_data(cls, parse_data, top=None):
refid = getattr(parse_data, 'refid', None)
if refid is not None and top is not None and refid in top._refs:
return top._refs[refid]
inst = cls(parse_data, top=top)
if refid is not None:
inst.refid = refid
inst.add_ref(inst)
return inst
def add_ref(self, obj):
if hasattr(obj, 'refid'):
self.top._refs[obj.refid] = obj
mem_classes = []
def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
raise StandardError(("Did not find a class for object '%s'." \
% (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
raise StandardError('No class matched this object.')
self.add_ref(converted)
return converted
except StandardError, e:
print e
@classmethod
def includes(cls, inst):
return isinstance(inst, cls)
@classmethod
def can_parse(cls, obj):
return False
def _parse(self):
self._parsed = True
def _get_dict_members(self, cat=None):
"""
For given category a dictionary is returned mapping member names to
members of that category. For names that are duplicated the name is
mapped to None.
"""
self.confirm_no_error()
if cat not in self._dict_members:
new_dict = {}
for mem in self.in_category(cat):
if mem.name() not in new_dict:
new_dict[mem.name()] = mem
else:
new_dict[mem.name()] = self.Duplicate
self._dict_members[cat] = new_dict
return self._dict_members[cat]
def in_category(self, cat):
self.confirm_no_error()
if cat is None:
return self._members
if cat not in self._in_category:
self._in_category[cat] = [mem for mem in self._members
if cat.includes(mem)]
return self._in_category[cat]
def get_member(self, name, cat=None):
self.confirm_no_error()
# Check if it's in a namespace or class.
bits = name.split('::')
first = bits[0]
rest = '::'.join(bits[1:])
member = self._get_dict_members(cat).get(first, self.NoSuchMember)
# Raise any errors that are returned.
if member in set([self.NoSuchMember, self.Duplicate]):
raise member()
if rest:
return member.get_member(rest, cat=cat)
return member
def has_member(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False
def data(self):
self.confirm_no_error()
return self._data
def members(self):
self.confirm_no_error()
return self._members
def process_memberdefs(self):
mdtss = []
for sec in self._retrieved_data.compounddef.sectiondef:
mdtss += sec.memberdef
# At the moment we lose all information associated with sections.
# Sometimes a memberdef is in several sectiondef.
# We make sure we don't get duplicates here.
uniques = set([])
for mem in mdtss:
converted = self.convert_mem(mem)
pair = (mem.name, mem.__class__)
if pair not in uniques:
uniques.add(pair)
self._members.append(converted)
def retrieve_data(self):
filename = os.path.join(self._xml_path, self.refid + '.xml')
try:
self._retrieved_data = compound.parse(filename)
except ExpatError:
print('Error in xml in file %s' % filename)
self._error = True
self._retrieved_data = None
def check_parsed(self):
if not self._parsed:
self._parse()
def confirm_no_error(self):
self.check_parsed()
if self._error:
raise self.ParsingError()
def error(self):
self.check_parsed()
return self._error
def name(self):
# first see if we can do it without processing.
if self._parse_data is not None:
return self._parse_data.name
self.check_parsed()
return self._retrieved_data.compounddef.name
| gpl-3.0 |
munyirik/python | cpython/Lib/multiprocessing/heap.py | 12 | 8325 | #
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import bisect
import mmap
import os
import sys
import tempfile
import threading
from . import context
from . import reduction
from . import util
__all__ = ['BufferWrapper']
#
# Inheritable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
import _winapi
class Arena(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, size):
self.size = size
for i in range(100):
name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
buf = mmap.mmap(-1, size, tagname=name)
if _winapi.GetLastError() == 0:
break
# We have reopened a preexisting mmap.
buf.close()
else:
raise FileExistsError('Cannot find name for new mmap')
self.name = name
self.buffer = buf
self._state = (self.size, self.name)
def __getstate__(self):
context.assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
# XXX Temporarily preventing buildbot failures while determining
# XXX the correct long-term fix. See issue 23060
#assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size, fd=-1):
self.size = size
self.fd = fd
if fd == -1:
self.fd, name = tempfile.mkstemp(
prefix='pym-%d-'%os.getpid(), dir=util.get_temp_dir())
os.unlink(name)
util.Finalize(self, os.close, (self.fd,))
with open(self.fd, 'wb', closefd=False) as f:
bs = 1024 * 1024
if size >= bs:
zeros = b'\0' * bs
for _ in range(size // bs):
f.write(zeros)
del zeros
f.write(b'\0' * (size % bs))
assert f.tell() == size
self.buffer = mmap.mmap(self.fd, self.size)
def reduce_arena(a):
if a.fd == -1:
raise ValueError('Arena is unpicklable because '
'forking was enabled when it was created')
return rebuild_arena, (a.size, reduction.DupFd(a.fd))
def rebuild_arena(size, dupfd):
return Arena(size, dupfd.detach())
reduction.register(Arena, reduce_arena)
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
util.info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held.
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxsize
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
with self._lock:
self._free_pending_blocks()
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
#
# Class representing a chunk of an mmap -- can be inherited by child process
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
util.Finalize(self, BufferWrapper._heap.free, args=(block,))
def create_memoryview(self):
(arena, start, stop), size = self._state
return memoryview(arena.buffer)[start:start+size]
| bsd-3-clause |
imply/chuu | third_party/protobuf/python/google/protobuf/internal/cpp_message.py | 223 | 23539 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import copy_reg
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not operator.isSequenceType(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = range(len(self))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in kwargs.iteritems():
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in self._composite_fields.iteritems():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return self._cmsg.DebugString()
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from google.protobuf import text_format
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in locals().copy().iteritems():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
| bsd-3-clause |
n4hy/gnuradio | grc/gui/FlowGraph.py | 16 | 19013 | """
Copyright 2007-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Constants import SCROLL_PROXIMITY_SENSITIVITY, SCROLL_DISTANCE
import Actions
import Colors
import Utils
from Element import Element
import pygtk
pygtk.require('2.0')
import gtk
import random
import Messages
class FlowGraph(Element):
"""
FlowGraph is the data structure to store graphical signal blocks,
graphical inputs and outputs,
and the connections between inputs and outputs.
"""
def __init__(self):
"""
FlowGraph contructor.
Create a list for signal blocks and connections. Connect mouse handlers.
"""
Element.__init__(self)
#when is the flow graph selected? (used by keyboard event handler)
self.is_selected = lambda: bool(self.get_selected_elements())
#important vars dealing with mouse event tracking
self.element_moved = False
self.mouse_pressed = False
self.unselect()
self.press_coor = (0, 0)
#selected ports
self._old_selected_port = None
self._new_selected_port = None
#context menu
self._context_menu = gtk.Menu()
for action in [
Actions.BLOCK_CUT,
Actions.BLOCK_COPY,
Actions.BLOCK_PASTE,
Actions.ELEMENT_DELETE,
Actions.BLOCK_ROTATE_CCW,
Actions.BLOCK_ROTATE_CW,
Actions.BLOCK_ENABLE,
Actions.BLOCK_DISABLE,
Actions.BLOCK_PARAM_MODIFY,
]: self._context_menu.append(action.create_menu_item())
###########################################################################
# Access Drawing Area
###########################################################################
def get_drawing_area(self): return self.drawing_area
def queue_draw(self): self.get_drawing_area().queue_draw()
def get_size(self): return self.get_drawing_area().get_size_request()
def set_size(self, *args): self.get_drawing_area().set_size_request(*args)
def get_scroll_pane(self): return self.drawing_area.get_parent()
def get_ctrl_mask(self): return self.drawing_area.ctrl_mask
def new_pixmap(self, *args): return self.get_drawing_area().new_pixmap(*args)
def add_new_block(self, key, coor=None):
"""
Add a block of the given key to this flow graph.
@param key the block key
@param coor an optional coordinate or None for random
"""
id = self._get_unique_id(key)
#calculate the position coordinate
h_adj = self.get_scroll_pane().get_hadjustment()
v_adj = self.get_scroll_pane().get_vadjustment()
if coor is None: coor = (
int(random.uniform(.25, .75)*h_adj.page_size + h_adj.get_value()),
int(random.uniform(.25, .75)*v_adj.page_size + v_adj.get_value()),
)
#get the new block
block = self.get_new_block(key)
block.set_coordinate(coor)
block.set_rotation(0)
block.get_param('id').set_value(id)
Actions.ELEMENT_CREATE()
###########################################################################
# Copy Paste
###########################################################################
def copy_to_clipboard(self):
"""
Copy the selected blocks and connections into the clipboard.
@return the clipboard
"""
#get selected blocks
blocks = self.get_selected_blocks()
if not blocks: return None
#calc x and y min
x_min, y_min = blocks[0].get_coordinate()
for block in blocks:
x, y = block.get_coordinate()
x_min = min(x, x_min)
y_min = min(y, y_min)
#get connections between selected blocks
connections = filter(
lambda c: c.get_source().get_parent() in blocks and c.get_sink().get_parent() in blocks,
self.get_connections(),
)
clipboard = (
(x_min, y_min),
[block.export_data() for block in blocks],
[connection.export_data() for connection in connections],
)
return clipboard
def paste_from_clipboard(self, clipboard):
"""
Paste the blocks and connections from the clipboard.
@param clipboard the nested data of blocks, connections
"""
selected = set()
(x_min, y_min), blocks_n, connections_n = clipboard
old_id2block = dict()
#recalc the position
h_adj = self.get_scroll_pane().get_hadjustment()
v_adj = self.get_scroll_pane().get_vadjustment()
x_off = h_adj.get_value() - x_min + h_adj.page_size/4
y_off = v_adj.get_value() - y_min + v_adj.page_size/4
#create blocks
for block_n in blocks_n:
block_key = block_n.find('key')
if block_key == 'options': continue
block = self.get_new_block(block_key)
selected.add(block)
#set params
params_n = block_n.findall('param')
for param_n in params_n:
param_key = param_n.find('key')
param_value = param_n.find('value')
#setup id parameter
if param_key == 'id':
old_id2block[param_value] = block
#if the block id is not unique, get a new block id
if param_value in [block.get_id() for block in self.get_blocks()]:
param_value = self._get_unique_id(param_value)
#set value to key
block.get_param(param_key).set_value(param_value)
#move block to offset coordinate
block.move((x_off, y_off))
#update before creating connections
self.update()
#create connections
for connection_n in connections_n:
source = old_id2block[connection_n.find('source_block_id')].get_source(connection_n.find('source_key'))
sink = old_id2block[connection_n.find('sink_block_id')].get_sink(connection_n.find('sink_key'))
self.connect(source, sink)
#set all pasted elements selected
for block in selected: selected = selected.union(set(block.get_connections()))
self._selected_elements = list(selected)
###########################################################################
# Modify Selected
###########################################################################
def type_controller_modify_selected(self, direction):
"""
Change the registered type controller for the selected signal blocks.
@param direction +1 or -1
@return true for change
"""
return any([sb.type_controller_modify(direction) for sb in self.get_selected_blocks()])
def port_controller_modify_selected(self, direction):
"""
Change port controller for the selected signal blocks.
@param direction +1 or -1
@return true for changed
"""
return any([sb.port_controller_modify(direction) for sb in self.get_selected_blocks()])
def enable_selected(self, enable):
"""
Enable/disable the selected blocks.
@param enable true to enable
@return true if changed
"""
changed = False
for selected_block in self.get_selected_blocks():
if selected_block.get_enabled() != enable:
selected_block.set_enabled(enable)
changed = True
return changed
def move_selected(self, delta_coordinate):
"""
Move the element and by the change in coordinates.
@param delta_coordinate the change in coordinates
"""
for selected_block in self.get_selected_blocks():
selected_block.move(delta_coordinate)
self.element_moved = True
def rotate_selected(self, rotation):
"""
Rotate the selected blocks by multiples of 90 degrees.
@param rotation the rotation in degrees
@return true if changed, otherwise false.
"""
if not self.get_selected_blocks(): return False
#initialize min and max coordinates
min_x, min_y = self.get_selected_block().get_coordinate()
max_x, max_y = self.get_selected_block().get_coordinate()
#rotate each selected block, and find min/max coordinate
for selected_block in self.get_selected_blocks():
selected_block.rotate(rotation)
#update the min/max coordinate
x, y = selected_block.get_coordinate()
min_x, min_y = min(min_x, x), min(min_y, y)
max_x, max_y = max(max_x, x), max(max_y, y)
#calculate center point of slected blocks
ctr_x, ctr_y = (max_x + min_x)/2, (max_y + min_y)/2
#rotate the blocks around the center point
for selected_block in self.get_selected_blocks():
x, y = selected_block.get_coordinate()
x, y = Utils.get_rotated_coordinate((x - ctr_x, y - ctr_y), rotation)
selected_block.set_coordinate((x + ctr_x, y + ctr_y))
return True
def remove_selected(self):
"""
Remove selected elements
@return true if changed.
"""
changed = False
for selected_element in self.get_selected_elements():
self.remove_element(selected_element)
changed = True
return changed
def draw(self, gc, window):
"""
Draw the background and grid if enabled.
Draw all of the elements in this flow graph onto the pixmap.
Draw the pixmap to the drawable window of this flow graph.
"""
W,H = self.get_size()
#draw the background
gc.set_foreground(Colors.FLOWGRAPH_BACKGROUND_COLOR)
window.draw_rectangle(gc, True, 0, 0, W, H)
#draw multi select rectangle
if self.mouse_pressed and (not self.get_selected_elements() or self.get_ctrl_mask()):
#coordinates
x1, y1 = self.press_coor
x2, y2 = self.get_coordinate()
#calculate top-left coordinate and width/height
x, y = int(min(x1, x2)), int(min(y1, y2))
w, h = int(abs(x1 - x2)), int(abs(y1 - y2))
#draw
gc.set_foreground(Colors.HIGHLIGHT_COLOR)
window.draw_rectangle(gc, True, x, y, w, h)
gc.set_foreground(Colors.BORDER_COLOR)
window.draw_rectangle(gc, False, x, y, w, h)
#draw blocks on top of connections
for element in self.get_connections() + self.get_blocks():
element.draw(gc, window)
#draw selected blocks on top of selected connections
for selected_element in self.get_selected_connections() + self.get_selected_blocks():
selected_element.draw(gc, window)
def update_selected(self):
"""
Remove deleted elements from the selected elements list.
Update highlighting so only the selected are highlighted.
"""
selected_elements = self.get_selected_elements()
elements = self.get_elements()
#remove deleted elements
for selected in selected_elements:
if selected in elements: continue
selected_elements.remove(selected)
if self._old_selected_port and self._old_selected_port.get_parent() not in elements:
self._old_selected_port = None
if self._new_selected_port and self._new_selected_port.get_parent() not in elements:
self._new_selected_port = None
#update highlighting
for element in elements:
element.set_highlighted(element in selected_elements)
def update(self):
"""
Call the top level rewrite and validate.
Call the top level create labels and shapes.
"""
self.rewrite()
self.validate()
self.create_labels()
self.create_shapes()
##########################################################################
## Get Selected
##########################################################################
def unselect(self):
"""
Set selected elements to an empty set.
"""
self._selected_elements = []
def what_is_selected(self, coor, coor_m=None):
"""
What is selected?
At the given coordinate, return the elements found to be selected.
If coor_m is unspecified, return a list of only the first element found to be selected:
Iterate though the elements backwards since top elements are at the end of the list.
If an element is selected, place it at the end of the list so that is is drawn last,
and hence on top. Update the selected port information.
@param coor the coordinate of the mouse click
@param coor_m the coordinate for multi select
@return the selected blocks and connections or an empty list
"""
selected_port = None
selected = set()
#check the elements
for element in reversed(self.get_elements()):
selected_element = element.what_is_selected(coor, coor_m)
if not selected_element: continue
#update the selected port information
if selected_element.is_port():
if not coor_m: selected_port = selected_element
selected_element = selected_element.get_parent()
selected.add(selected_element)
#place at the end of the list
self.get_elements().remove(element)
self.get_elements().append(element)
#single select mode, break
if not coor_m: break
#update selected ports
self._old_selected_port = self._new_selected_port
self._new_selected_port = selected_port
return list(selected)
def get_selected_connections(self):
"""
Get a group of selected connections.
@return sub set of connections in this flow graph
"""
selected = set()
for selected_element in self.get_selected_elements():
if selected_element.is_connection(): selected.add(selected_element)
return list(selected)
def get_selected_blocks(self):
"""
Get a group of selected blocks.
@return sub set of blocks in this flow graph
"""
selected = set()
for selected_element in self.get_selected_elements():
if selected_element.is_block(): selected.add(selected_element)
return list(selected)
def get_selected_block(self):
"""
Get the selected block when a block or port is selected.
@return a block or None
"""
return self.get_selected_blocks() and self.get_selected_blocks()[0] or None
def get_selected_elements(self):
"""
Get the group of selected elements.
@return sub set of elements in this flow graph
"""
return self._selected_elements
def get_selected_element(self):
"""
Get the selected element.
@return a block, port, or connection or None
"""
return self.get_selected_elements() and self.get_selected_elements()[0] or None
def update_selected_elements(self):
"""
Update the selected elements.
The update behavior depends on the state of the mouse button.
When the mouse button pressed the selection will change when
the control mask is set or the new selection is not in the current group.
When the mouse button is released the selection will change when
the mouse has moved and the control mask is set or the current group is empty.
Attempt to make a new connection if the old and ports are filled.
If the control mask is set, merge with the current elements.
"""
selected_elements = None
if self.mouse_pressed:
new_selections = self.what_is_selected(self.get_coordinate())
#update the selections if the new selection is not in the current selections
#allows us to move entire selected groups of elements
if self.get_ctrl_mask() or not (
new_selections and new_selections[0] in self.get_selected_elements()
): selected_elements = new_selections
else: #called from a mouse release
if not self.element_moved and (not self.get_selected_elements() or self.get_ctrl_mask()):
selected_elements = self.what_is_selected(self.get_coordinate(), self.press_coor)
#this selection and the last were ports, try to connect them
if self._old_selected_port and self._new_selected_port and \
self._old_selected_port is not self._new_selected_port:
try:
self.connect(self._old_selected_port, self._new_selected_port)
Actions.ELEMENT_CREATE()
except: Messages.send_fail_connection()
self._old_selected_port = None
self._new_selected_port = None
return
#update selected elements
if selected_elements is None: return
old_elements = set(self.get_selected_elements())
self._selected_elements = list(set(selected_elements))
new_elements = set(self.get_selected_elements())
#if ctrl, set the selected elements to the union - intersection of old and new
if self.get_ctrl_mask():
self._selected_elements = list(
set.union(old_elements, new_elements) - set.intersection(old_elements, new_elements)
)
Actions.ELEMENT_SELECT()
##########################################################################
## Event Handlers
##########################################################################
def handle_mouse_context_press(self, coordinate, event):
"""
The context mouse button was pressed:
If no elements were selected, perform re-selection at this coordinate.
Then, show the context menu at the mouse click location.
"""
selections = self.what_is_selected(coordinate)
if not set(selections).intersection(self.get_selected_elements()):
self.set_coordinate(coordinate)
self.mouse_pressed = True
self.update_selected_elements()
self.mouse_pressed = False
self._context_menu.popup(None, None, None, event.button, event.time)
def handle_mouse_selector_press(self, double_click, coordinate):
"""
The selector mouse button was pressed:
Find the selected element. Attempt a new connection if possible.
Open the block params window on a double click.
Update the selection state of the flow graph.
"""
self.press_coor = coordinate
self.set_coordinate(coordinate)
self.time = 0
self.mouse_pressed = True
if double_click: self.unselect()
self.update_selected_elements()
#double click detected, bring up params dialog if possible
if double_click and self.get_selected_block():
self.mouse_pressed = False
Actions.BLOCK_PARAM_MODIFY()
def handle_mouse_selector_release(self, coordinate):
"""
The selector mouse button was released:
Update the state, handle motion (dragging).
And update the selected flowgraph elements.
"""
self.set_coordinate(coordinate)
self.time = 0
self.mouse_pressed = False
if self.element_moved:
Actions.BLOCK_MOVE()
self.element_moved = False
self.update_selected_elements()
def handle_mouse_motion(self, coordinate):
"""
The mouse has moved, respond to mouse dragging.
Move a selected element to the new coordinate.
Auto-scroll the scroll bars at the boundaries.
"""
#to perform a movement, the mouse must be pressed, no pending events
if gtk.events_pending() or not self.mouse_pressed: return
#perform autoscrolling
width, height = self.get_size()
x, y = coordinate
h_adj = self.get_scroll_pane().get_hadjustment()
v_adj = self.get_scroll_pane().get_vadjustment()
for pos, length, adj, adj_val, adj_len in (
(x, width, h_adj, h_adj.get_value(), h_adj.page_size),
(y, height, v_adj, v_adj.get_value(), v_adj.page_size),
):
#scroll if we moved near the border
if pos-adj_val > adj_len-SCROLL_PROXIMITY_SENSITIVITY and adj_val+SCROLL_DISTANCE < length-adj_len:
adj.set_value(adj_val+SCROLL_DISTANCE)
adj.emit('changed')
elif pos-adj_val < SCROLL_PROXIMITY_SENSITIVITY:
adj.set_value(adj_val-SCROLL_DISTANCE)
adj.emit('changed')
#remove the connection if selected in drag event
if len(self.get_selected_elements()) == 1 and self.get_selected_element().is_connection():
Actions.ELEMENT_DELETE()
#move the selected elements and record the new coordinate
X, Y = self.get_coordinate()
if not self.get_ctrl_mask(): self.move_selected((int(x - X), int(y - Y)))
self.set_coordinate((x, y))
#queue draw for animation
self.queue_draw()
| gpl-3.0 |
tompecina/petition | petition/data/models.py | 1 | 2394 | # -*- coding: utf-8 -*-
#
# petition/data/models.py
#
# Copyright (C) 2011-16 Tomáš Pecina <tomas@pecina.cz>
#
# This file is part of petition.pecina.cz, a web-based petition
# application.
#
# This application is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Petition(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=30, unique=True)
domain = models.CharField(max_length=255, blank=True)
email = models.EmailField(blank=True)
closed = models.BooleanField()
longname = models.CharField(max_length=255)
keywords = models.CharField(max_length=255, blank=True)
css = models.TextField(blank=True)
text = models.TextField()
counter = models.IntegerField()
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
def __str__(self):
return self.longname
class Signature(models.Model):
petition = models.ForeignKey(Petition)
name = models.CharField(max_length=30, db_index=True)
occupation = models.CharField(max_length=255, blank=True)
occupation_hidden = models.BooleanField()
address = models.CharField(max_length=255)
address_hidden = models.BooleanField()
birthdate = models.DateField(blank=True, null=True)
birthdate_hidden = models.BooleanField()
email = models.EmailField(blank=True)
email_hidden = models.BooleanField()
note = models.TextField(blank=True)
note_hidden = models.BooleanField()
ip = models.GenericIPAddressField()
domain = models.CharField(max_length=255)
reported = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
def __str__(self):
return self.name
| gpl-3.0 |
sniemi/SamPy | sandbox/src2/fitsheader/fitsheader.py | 1 | 5472 | from pyfits import getheader
import pyfits
import os
class HeaderException(Exception):
pass
##############
### ALFOSC ###
##############
def extractALFOSCHeader(file):
""" Extract ALFOSC keywords from a fits file """
try:
hdulist = pyfits.open(file)
hdulist.close()
if len(hdulist) == 2:
prihdr = hdulist[0].header
im1hdr = hdulist[1].header
return ['alfosc','AL',prihdr,im1hdr]
elif len(hdulist) == 3:
prihdr = hdulist[0].header
im1hdr = hdulist[1].header
im2hdr = hdulist[2].header
return ['alfosc','AL',prihdr,im1hdr,im2hdr]
else:
return ['ERROR']
# Error
except Exception, e:
raise HeaderException(e)
##############
### NOTCAM ###
##############
def extractNOTCAMHeader(file):
""" Extract NOTCAM keywords from a fits file """
try:
hdulist = pyfits.open(file)
hdulist.close()
if len(hdulist) > 0:
prihdr = hdulist[0].header
a = ['notcam','NC',prihdr]
for i in range(1, len(hdulist)):
a.append(hdulist[i].header)
return a
else:
return ['ERROR']
# Error
except Exception, e:
raise HeaderException(e)
#############
### MOSCA ###
#############
def extractMOSCAHeader(file):
""" Extract MOSCA keywords from fits header """
try:
hdulist = pyfits.open(file)
hdulist.close()
if len(hdulist) > 0:
prihdr = hdulist[0].header
a = ['mosca','MO',prihdr]
for i in range(1, len(hdulist)):
a.append(hdulist[i].header)
return a
else:
return ['ERROR']
# Error
except Exception, e:
raise HeaderException(e)
###############
### STANCAM ###
###############
def extractSTANCAMHeader(file):
""" Extract StanCam keywords from fits header """
try:
hdulist = pyfits.open(file)
hdulist.close()
if len(hdulist) > 0:
prihdr = hdulist[0].header
a = ['stancam','ST',prihdr]
for i in range(1, len(hdulist)):
a.append(hdulist[i].header)
return a
else:
return ['ERROR']
# Error
except Exception, e:
raise HeaderException(e)
############
### FIES ###
############
def extractFIESHeader(file):
""" Extract FIES keywords from fits header """
try:
hdulist = pyfits.open(file)
hdulist.close()
if len(hdulist) > 0:
prihdr = hdulist[0].header
a = ['fies','FI',prihdr]
for i in range(1, len(hdulist)):
a.append(hdulist[i].header)
return a
else:
return ['ERROR']
# Error
except Exception, e:
raise HeaderException(e)
##################
### OLD ALFOSC ###
##################
def extractOldALFOSCHeader(file):
""" Extract headers for non-MEF ALFOSC Images """
try:
hdulist = pyfits.open(file)
hdulist.close()
# Extract primary header unit
ph = extractHDU(file,0)
# Form a proper timestamp from a float type UT
ut = requireValidFloat('UT',ph)
hh = int(ut)
mm = int((ut-hh)*60)
ss = int((((ut-hh)*60)-mm)*60)
timestamp = "%02d:%02d:%02d" % (hh,mm,ss)
date_obs = requireValidString('DATE-OBS', ph)
fitsheader = {
'imagetyp': ph.get('IMAGETYP', 'na').strip() or 'na',
'exptime' : requireValidFloat('EXPTIME',ph),
'azimuth' : '0.00',
'austatus': 'na',
'telfocus': requireValidInt('TELFOCUS', ph),
'gain' : '0.726',
'alfltid' : requireValidInt('FILTID', ph),
'alfltnm' : requireValidString('FILTER', ph),
'fafltid' : requireValidInt('AFILTID', ph),
'fafltnm' : requireValidString('AFILTER', ph),
'fbfltid' : requireValidInt('BFILTID', ph),
'fbfltnm' : requireValidString('BFILTER', ph),
'rotpos' : requireValidFloat('ROTPOS',ph),
'apertur' : requireValidString('APERTUR', ph),
'ra' : '%.2f' % requireValidFloat('RA',ph),
'decl' : '%.2f' % requireValidFloat('DEC',ph)
}
fitsheader['dateobs'] = "%sT%s" % (date_obs, timestamp)
# Calculate telescope altitude from airmass
airmass = requireValidFloat('AIRMASS',ph)
fitsheader['telalt'] = '%.2f' % (90 - degrees(pi/2 - asin(1/airmass)))
# Calculate pixel scale
cd1_1 = requireValidInt('CDELT1', ph)
fitsheader['pscale'] = str(cd1_1 * 0.19)
fitsheader['instrume'] = 'alfosc'
if (fitsheader['exptime'] > 1.0) and (requireValidString('GRISM', ph) == 'Open_(Lyot)'):
fitsheader['imaging'] = 1
else:
fitsheader['imaging'] = 0
fitsheader['keys'] = ['dateobs','telalt','azimuth','rotpos','ra','decl','telfocus','pscale','gain',
'apertur','alfltid','alfltnm','fafltid','fafltnm','fbfltid','fbfltnm',
'imagetyp','exptime','austatus']
except HeaderException, e:
return ['ERROR']
return fitsheader
### SUB ROUTINES ###
def extractHeader(file) :
"""
Pseudo method for extracting fitsheader keywords. Will call an
apropriate method, depending on filename (instrument)
"""
headerDispatching = {
'AL': extractALFOSCHeader,
'NC': extractNOTCAMHeader,
'MO': extractMOSCAHeader,
'ST': extractSTANCAMHeader,
'FI': extractFIESHeader
}
fn = headerDispatching.get(os.path.basename(file)[:2], extractOldALFOSCHeader)
return fn(file)
def extractHDU(file,unit):
""" Return a header unit """
try:
return getheader(file, unit)
except IndexError:
pass
raise HeaderException("No such HDU: %s" % unit)
| bsd-2-clause |
smartforceplus/SmartForceplus | addons/mail/mail_followers.py | 19 | 12361 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import threading
from openerp.osv import osv, fields
from openerp import tools, SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools.mail import plaintext2html
class mail_followers(osv.Model):
""" mail_followers holds the data related to the follow mechanism inside
OpenERP. Partners can choose to follow documents (records) of any kind
that inherits from mail.thread. Following documents allow to receive
notifications for new messages.
A subscription is characterized by:
:param: res_model: model of the followed objects
:param: res_id: ID of resource (may be 0 for every objects)
"""
_name = 'mail.followers'
_rec_name = 'partner_id'
_log_access = False
_description = 'Document Followers'
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_id': fields.many2one('res.partner', string='Related Partner',
ondelete='cascade', required=True, select=1),
'subtype_ids': fields.many2many('mail.message.subtype', string='Subtype',
help="Message subtypes followed, meaning subtypes that will be pushed onto the user's Wall."),
}
#
# Modifying followers change access rights to individual documents. As the
# cache may contain accessible/inaccessible data, one has to refresh it.
#
def create(self, cr, uid, vals, context=None):
res = super(mail_followers, self).create(cr, uid, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(mail_followers, self).write(cr, uid, ids, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(mail_followers, self).unlink(cr, uid, ids, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
_sql_constraints = [('mail_followers_res_partner_res_model_id_uniq','unique(res_model,res_id,partner_id)','Error, a partner cannot follow twice the same object.')]
class mail_notification(osv.Model):
""" Class holding notifications pushed to partners. Followers and partners
added in 'contacts to notify' receive notifications. """
_name = 'mail.notification'
_rec_name = 'partner_id'
_log_access = False
_description = 'Notifications'
_columns = {
'partner_id': fields.many2one('res.partner', string='Contact',
ondelete='cascade', required=True, select=1),
'is_read': fields.boolean('Read', select=1, oldname='read'),
'starred': fields.boolean('Starred', select=1,
help='Starred message that goes into the todo mailbox'),
'message_id': fields.many2one('mail.message', string='Message',
ondelete='cascade', required=True, select=1),
}
_defaults = {
'is_read': False,
'starred': False,
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('mail_notification_partner_id_read_starred_message_id',))
if not cr.fetchone():
cr.execute('CREATE INDEX mail_notification_partner_id_read_starred_message_id ON mail_notification (partner_id, is_read, starred, message_id)')
def get_partners_to_email(self, cr, uid, ids, message, context=None):
""" Return the list of partners to notify, based on their preferences.
:param browse_record message: mail.message to notify
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
"""
notify_pids = []
for notification in self.browse(cr, uid, ids, context=context):
if notification.is_read:
continue
partner = notification.partner_id
# Do not send to partners without email address defined
if not partner.email:
continue
# Do not send to partners having same email address than the author (can cause loops or bounce effect due to messy database)
if message.author_id and message.author_id.email == partner.email:
continue
# Partner does not want to receive any emails or is opt-out
if partner.notify_email == 'none':
continue
notify_pids.append(partner.id)
return notify_pids
def get_signature_footer(self, cr, uid, user_id, res_model=None, res_id=None, context=None, user_signature=True):
""" Format a standard footer for notification emails (such as pushed messages
notification or invite emails).
Format:
<p>--<br />
Administrator
</p>
<div>
<small>Sent from <a ...>Your Company</a> using <a ...>OpenERP</a>.</small>
</div>
"""
footer = ""
if not user_id:
return footer
# add user signature
user = self.pool.get("res.users").browse(cr, SUPERUSER_ID, [user_id], context=context)[0]
if user_signature:
if user.signature:
signature = user.signature
else:
signature = "--<br />%s" % user.name
footer = tools.append_content_to_html(footer, signature, plaintext=False)
# add company signature
if user.company_id.website:
website_url = ('http://%s' % user.company_id.website) if not user.company_id.website.lower().startswith(('http:', 'https:')) \
else user.company_id.website
company = "<a style='color:inherit' href='%s'>%s</a>" % (website_url, user.company_id.name)
else:
company = user.company_id.name
sent_by = _('Sent by %(company)s using %(odoo)s')
signature_company = '<br /><small>%s</small>' % (sent_by % {
'company': company,
'odoo': "<a style='color:inherit' href='https://www.odoo.com/'>Odoo</a>"
})
footer = tools.append_content_to_html(footer, signature_company, plaintext=False, container_tag='div')
return footer
def update_message_notification(self, cr, uid, ids, message_id, partner_ids, context=None):
existing_pids = set()
new_pids = set()
new_notif_ids = []
for notification in self.browse(cr, uid, ids, context=context):
existing_pids.add(notification.partner_id.id)
# update existing notifications
self.write(cr, uid, ids, {'is_read': False}, context=context)
# create new notifications
new_pids = set(partner_ids) - existing_pids
for new_pid in new_pids:
new_notif_ids.append(self.create(cr, uid, {'message_id': message_id, 'partner_id': new_pid, 'is_read': False}, context=context))
return new_notif_ids
def _notify_email(self, cr, uid, ids, message_id, force_send=False, user_signature=True, context=None):
message = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
# compute partners
email_pids = self.get_partners_to_email(cr, uid, ids, message, context=None)
if not email_pids:
return True
# compute email body (signature, company data)
body_html = message.body
# add user signature except for mail groups, where users are usually adding their own signatures already
user_id = message.author_id and message.author_id.user_ids and message.author_id.user_ids[0] and message.author_id.user_ids[0].id or None
signature_company = self.get_signature_footer(cr, uid, user_id, res_model=message.model, res_id=message.res_id, context=context, user_signature=(user_signature and message.model != 'mail.group'))
if signature_company:
body_html = tools.append_content_to_html(body_html, signature_company, plaintext=False, container_tag='div')
# compute email references
references = message.parent_id.message_id if message.parent_id else False
# custom values
custom_values = dict()
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'):
custom_values = self.pool[message.model].message_get_email_values(cr, uid, message.res_id, message, context=context)
# create email values
max_recipients = 50
chunks = [email_pids[x:x + max_recipients] for x in xrange(0, len(email_pids), max_recipients)]
email_ids = []
for chunk in chunks:
mail_values = {
'mail_message_id': message.id,
'auto_delete': True,
'body_html': body_html,
'recipient_ids': [(4, id) for id in chunk],
'references': references,
}
mail_values.update(custom_values)
email_ids.append(self.pool.get('mail.mail').create(cr, uid, mail_values, context=context))
# NOTE:
# 1. for more than 50 followers, use the queue system
# 2. do not send emails immediately if the registry is not loaded,
# to prevent sending email during a simple update of the database
# using the command-line.
if force_send and len(chunks) < 2 and \
(not self.pool._init or
getattr(threading.currentThread(), 'testing', False)):
self.pool.get('mail.mail').send(cr, uid, email_ids, context=context)
return True
def _notify(self, cr, uid, message_id, partners_to_notify=None, context=None,
force_send=False, user_signature=True):
""" Send by email the notification depending on the user preferences
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
:param bool force_send: if True, the generated mail.mail is
immediately sent after being created, as if the scheduler
was executed for this message only.
:param bool user_signature: if True, the generated mail.mail body is
the body of the related mail.message with the author's signature
"""
notif_ids = self.search(cr, SUPERUSER_ID, [('message_id', '=', message_id), ('partner_id', 'in', partners_to_notify)], context=context)
# update or create notifications
new_notif_ids = self.update_message_notification(cr, SUPERUSER_ID, notif_ids, message_id, partners_to_notify, context=context)
# mail_notify_noemail (do not send email) or no partner_ids: do not send, return
if context and context.get('mail_notify_noemail'):
return True
# browse as SUPERUSER_ID because of access to res_partner not necessarily allowed
self._notify_email(cr, SUPERUSER_ID, new_notif_ids, message_id, force_send, user_signature, context=context)
| agpl-3.0 |
raid5/simpledoge | simplecoin/views.py | 1 | 11777 | import calendar
import time
import yaml
import datetime
from itsdangerous import TimedSerializer
from flask import (current_app, request, render_template, Blueprint, abort,
jsonify, g, session, Response)
from lever import get_joined
from .models import (Transaction, OneMinuteShare, Block, Payout, Blob,
FiveMinuteShare, OneHourShare, Status, FiveMinuteReject,
OneMinuteReject, OneHourReject, DonationPercent,
BonusPayout)
from . import db, root, cache
from .utils import (compress_typ, get_typ, verify_message, get_pool_acc_rej,
get_pool_eff, last_10_shares, total_earned, total_paid,
collect_user_stats, get_adj_round_shares,
get_pool_hashrate, last_block_time, get_alerts,
last_block_found)
main = Blueprint('main', __name__)
@main.route("/")
def home():
news = yaml.load(open(root + '/static/yaml/news.yaml'))
return render_template('home.html', news=news)
@main.route("/news")
def news():
news = yaml.load(open(root + '/static/yaml/news.yaml'))
return render_template('news.html', news=news)
@main.route("/blocks")
def blocks():
blocks = db.session.query(Block).order_by(Block.height.desc())
return render_template('blocks.html', blocks=blocks)
@main.route("/pool_stats")
def pool_stats():
current_block = db.session.query(Blob).filter_by(key="block").first()
current_block.data['reward'] = int(current_block.data['reward'])
blocks = db.session.query(Block).order_by(Block.height.desc()).limit(10)
reject_total, accept_total = get_pool_acc_rej()
efficiency = get_pool_eff()
return render_template('pool_stats.html',
blocks=blocks,
current_block=current_block,
efficiency=efficiency,
accept_total=accept_total,
reject_total=reject_total)
@main.route("/get_payouts", methods=['POST'])
def get_payouts():
""" Used by remote procedure call to retrieve a list of transactions to
be processed. Transaction information is signed for safety. """
s = TimedSerializer(current_app.config['rpc_signature'])
s.loads(request.data)
payouts = (Payout.query.filter_by(transaction_id=None).
join(Payout.block, aliased=True).filter_by(mature=True))
bonus_payouts = BonusPayout.query.filter_by(transaction_id=None)
pids = [(p.user, p.amount, p.id) for p in payouts]
bids = [(p.user, p.amount, p.id) for p in bonus_payouts]
return s.dumps([pids, bids])
@main.route("/confirm_payouts", methods=['POST'])
def confirm_transactions():
""" Used as a response from an rpc payout system. This will either reset
the sent status of a list of transactions upon failure on the remote side,
or create a new CoinTransaction object and link it to the transactions to
signify that the transaction has been processed. Both request and response
are signed. """
s = TimedSerializer(current_app.config['rpc_signature'])
data = s.loads(request.data)
# basic checking of input
try:
assert len(data['coin_txid']) == 64
assert isinstance(data['pids'], list)
assert isinstance(data['bids'], list)
for id in data['pids']:
assert isinstance(id, int)
for id in data['bids']:
assert isinstance(id, int)
except AssertionError:
current_app.logger.warn("Invalid data passed to confirm", exc_info=True)
abort(400)
coin_trans = Transaction.create(data['coin_txid'])
db.session.flush()
Payout.query.filter(Payout.id.in_(data['pids'])).update(
{Payout.transaction_id: coin_trans.txid}, synchronize_session=False)
BonusPayout.query.filter(BonusPayout.id.in_(data['bids'])).update(
{BonusPayout.transaction_id: coin_trans.txid}, synchronize_session=False)
db.session.commit()
return s.dumps(True)
@main.before_request
def add_pool_stats():
g.completed_block_shares = get_adj_round_shares()
g.round_duration = (datetime.datetime.utcnow() - last_block_time()).total_seconds()
g.hashrate = get_pool_hashrate()
blobs = Blob.query.filter(Blob.key.in_(("server", "diff"))).all()
try:
server = [b for b in blobs if b.key == "server"][0]
g.worker_count = int(server.data['stratum_clients'])
except IndexError:
g.worker_count = 0
try:
diff = float([b for b in blobs if b.key == "diff"][0].data['diff'])
except IndexError:
diff = -1
g.average_difficulty = diff
g.shares_to_solve = diff * (2 ** 16)
g.total_round_shares = g.shares_to_solve * current_app.config['last_n']
g.alerts = get_alerts()
@main.route("/close/<int:id>")
def close_alert(id):
dismissed_alerts = session.get('dismissed_alerts', [])
dismissed_alerts.append(id)
session['dismissed_alerts'] = dismissed_alerts
return Response('success')
@main.route("/api/pool_stats")
def pool_stats_api():
ret = {}
ret['hashrate'] = get_pool_hashrate()
ret['workers'] = g.worker_count
ret['completed_shares'] = g.completed_block_shares
ret['total_round_shares'] = g.total_round_shares
ret['round_duration'] = g.round_duration
sps = float(g.completed_block_shares) / g.round_duration
ret['shares_per_sec'] = sps
ret['last_block_found'] = last_block_found()
ret['shares_to_solve'] = g.shares_to_solve
ret['est_sec_remaining'] = (float(g.shares_to_solve) - g.completed_block_shares) / sps
return jsonify(**ret)
@main.route("/stats")
def user_stats():
return render_template('stats.html', page_title='User Stats - Look up statistics for a Dogecoin address')
@main.route("/round_summary")
def summary_page():
user_shares = cache.get('pplns_user_shares')
cached_time = cache.get('pplns_cache_time')
cached_donation = cache.get('user_donations')
def user_match(user):
if cached_donation is not None:
if user in cached_donation:
return cached_donation[user]
else:
return current_app.config['default_perc']
if cached_time is not None:
cached_time = cached_time.replace(second=0, microsecond=0).strftime("%Y-%m-%d %H:%M")
if user_shares is None:
user_list = []
else:
user_list = [([shares, user, (65536 * last_10_shares(user[6:]) / 600), user_match(user[6:])]) for user, shares in user_shares.iteritems()]
user_list = sorted(user_list, key=lambda x: x[0], reverse=True)
current_block = db.session.query(Blob).filter_by(key="block").first()
return render_template('round_summary.html',
users=user_list,
current_block=current_block,
cached_time=cached_time)
@main.route("/exc_test")
def exception():
current_app.logger.warn("Exception test!")
raise Exception()
return ""
@main.route("/charity")
def charity_view():
charities = []
for info in current_app.config['aliases']:
info['hashes_per_min'] = ((2 ** 16) * last_10_shares(info['address'])) / 600
info['total_paid'] = total_paid(info['address'])
charities.append(info)
return render_template('charity.html', charities=charities)
@main.route("/<address>/<worker>/details/<int:gpu>")
@main.route("/<address>/details/<int:gpu>", defaults={'worker': ''})
@main.route("/<address>//details/<int:gpu>", defaults={'worker': ''})
def worker_detail(address, worker, gpu):
status = Status.query.filter_by(user=address, worker=worker).first()
if status:
output = status.pretty_json(gpu)
else:
output = "Not available"
return jsonify(output=output)
@main.route("/<address>")
def user_dashboard(address=None):
if len(address) != 34:
abort(404)
stats = collect_user_stats(address)
# reorganize/create the recently viewed
recent = session.get('recent_users', [])
if address in recent:
recent.remove(address)
recent.insert(0, address)
session['recent_users'] = recent[:10]
return render_template('user_stats.html', username=address, **stats)
@main.route("/api/<address>")
def address_api(address):
if len(address) != 34:
abort(404)
stats = collect_user_stats(address)
stats['acct_items'] = get_joined(stats['acct_items'])
workers = []
for name, data in stats['workers'].iteritems():
workers.append(data)
workers[-1]['name'] = name
stats['workers'] = workers
stats['total_earned'] = float(stats['total_earned'])
if stats['pplns_cached_time']:
stats['pplns_cached_time'] = calendar.timegm(stats['pplns_cached_time'].utctimetuple())
day_shares = stats['last_10_shares'] * 6 * 24
daily_percentage = float(day_shares) / g.shares_to_solve
donation_perc = (1 - (stats['donation_perc'] / 100.0))
rrwd = current_app.config['reward']
stats['daily_est'] = daily_percentage * rrwd * donation_perc
stats['est_round_payout'] = (float(stats['round_shares']) / g.total_round_shares) * donation_perc * rrwd
return jsonify(**stats)
@main.route("/<address>/clear")
def address_clear(address=None):
if len(address) != 34:
abort(404)
# remove address from the recently viewed
recent = session.get('recent_users', [])
if address in recent:
recent.remove(address)
session['recent_users'] = recent[:10]
return jsonify(recent=recent[:10])
@main.route("/<address>/stats")
@main.route("/<address>/stats/<window>")
def address_stats(address=None, window="hour"):
# store all the raw data of we've grabbed
workers = {}
if window == "hour":
typ = OneMinuteShare
elif window == "day":
compress_typ(OneMinuteShare, address, workers)
typ = FiveMinuteShare
elif window == "month":
compress_typ(FiveMinuteShare, address, workers)
typ = OneHourShare
for m in get_typ(typ, address):
stamp = calendar.timegm(m.time.utctimetuple())
workers.setdefault(m.worker, {})
workers[m.worker].setdefault(stamp, 0)
workers[m.worker][stamp] += m.value
step = typ.slice_seconds
end = ((int(time.time()) // step) * step) - (step * 2)
start = end - typ.window.total_seconds() + (step * 2)
if address == "pool" and '' in workers:
workers['Entire Pool'] = workers['']
del workers['']
return jsonify(start=start, end=end, step=step, workers=workers)
@main.errorhandler(Exception)
def handle_error(error):
current_app.logger.exception(error)
return render_template("500.html")
@main.route("/guides")
@main.route("/guides/")
def guides_index():
return render_template("guides/index.html")
@main.route("/guides/<guide>")
def guides(guide):
return render_template("guides/" + guide + ".html")
@main.route("/faq")
def faq():
return render_template("faq.html")
@main.route("/set_donation/<address>", methods=['POST', 'GET'])
def set_donation(address):
vals = request.form
result = ""
if request.method == "POST":
try:
verify_message(address, vals['message'], vals['signature'])
except Exception as e:
current_app.logger.info("Failed to validate!", exc_info=True)
result = "An error occurred: " + str(e)
else:
result = "Successfully changed!"
perc = DonationPercent.query.filter_by(user=address).first()
if not perc:
perc = current_app.config.get('default_perc', 0)
else:
perc = perc.perc
return render_template("set_donation.html", username=address, result=result,
perc=perc)
| mit |
frainfreeze/studying | home/python/microblog/venv/lib/python3.5/site-packages/jinja2/_identifier.py | 89 | 1726 | # generated by scripts/generate_identifier_pattern.py
pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
| mit |
temasek/android_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/thirdparty/coverage/annotate.py | 68 | 3029 | """Source file annotation for Coverage."""
import os, re
from coverage.report import Reporter
class AnnotateReporter(Reporter):
"""Generate annotated source files showing line coverage.
This reporter creates annotated copies of the measured source files. Each
.py file is copied as a .py,cover file, with a left-hand margin annotating
each line::
> def h(x):
- if 0: #pragma: no cover
- pass
> if x == 1:
! a = 1
> else:
> a = 2
> h(2)
Executed lines use '>', lines not executed use '!', lines excluded from
consideration use '-'.
"""
def __init__(self, coverage, ignore_errors=False):
super(AnnotateReporter, self).__init__(coverage, ignore_errors)
self.directory = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def report(self, morfs, config, directory=None):
"""Run the report.
See `coverage.report()` for arguments.
"""
self.report_files(self.annotate_file, morfs, config, directory)
def annotate_file(self, cu, analysis):
"""Annotate a single file.
`cu` is the CodeUnit for the file to annotate.
"""
if not cu.relative:
return
filename = cu.filename
source = cu.source_file()
if self.directory:
dest_file = os.path.join(self.directory, cu.flat_rootname())
dest_file += ".py,cover"
else:
dest_file = filename + ",cover"
dest = open(dest_file, 'w')
statements = analysis.statements
missing = analysis.missing
excluded = analysis.excluded
lineno = 0
i = 0
j = 0
covered = True
while True:
line = source.readline()
if line == '':
break
lineno += 1
while i < len(statements) and statements[i] < lineno:
i += 1
while j < len(missing) and missing[j] < lineno:
j += 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only 'else:'.
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif lineno in excluded:
dest.write('- ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
source.close()
dest.close()
| bsd-3-clause |
Gabrielcarvfer/NS3 | examples/routing/simple-routing-ping6.py | 7 | 3384 | #
# Copyright (c) 2008-2009 Strasbourg University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: David Gross <gdavid.devel@gmail.com>
# Sebastien Vincent <vincent@clarinet.u-strasbg.fr>
#
#
# Network topology:
#
# n0 r n1
# | _ |
# ====|_|====
# router
#
import ns.internet_apps
import ns.core
import ns.csma
import ns.internet
import ns.network
def main(argv):
cmd = ns.core.CommandLine();
cmd.Parse(argv);
# Create nodes
print ("Create nodes")
n0 = ns.network.Node();
r = ns.network.Node();
n1 = ns.network.Node();
net1 = ns.network.NodeContainer();
net1.Add(n0);
net1.Add(r);
net2 = ns.network.NodeContainer();
net2.Add(r);
net2.Add(n1);
all = ns.network.NodeContainer();
all.Add(n0);
all.Add(r);
all.Add(n1);
# Create IPv6 Internet Stack
internetv6 = ns.internet.InternetStackHelper();
internetv6.Install(all);
# Create channels
csma = ns.csma.CsmaHelper();
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)));
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)));
d1 = csma.Install(net1);
d2 = csma.Install(net2);
# Create networks and assign IPv6 Addresses
print ("Addressing")
ipv6 = ns.internet.Ipv6AddressHelper();
ipv6.SetBase(ns.network.Ipv6Address("2001:1::"), ns.network.Ipv6Prefix(64));
i1 = ipv6.Assign(d1);
i1.SetForwarding(1, True);
i1.SetDefaultRouteInAllNodes(1);
ipv6.SetBase(ns.network.Ipv6Address("2001:2::"), ns.network.Ipv6Prefix(64));
i2 = ipv6.Assign(d2);
i2.SetForwarding(0, True);
i2.SetDefaultRouteInAllNodes(0);
# Create a Ping6 application to send ICMPv6 echo request from n0 to n1 via r
print ("Application")
packetSize = 1024;
maxPacketCount = 5;
interPacketInterval = ns.core.Seconds(1.);
ping6 = ns.internet_apps.Ping6Helper();
ping6.SetLocal(i1.GetAddress(0, 1));
ping6.SetRemote(i2.GetAddress(1, 1));
ping6.SetAttribute("MaxPackets", ns.core.UintegerValue(maxPacketCount));
ping6.SetAttribute("Interval", ns.core.TimeValue(interPacketInterval));
ping6.SetAttribute("PacketSize", ns.core.UintegerValue(packetSize));
apps = ping6.Install(ns.network.NodeContainer(net1.Get(0)));
apps.Start(ns.core.Seconds(2.0));
apps.Stop(ns.core.Seconds(20.0));
print ("Tracing")
ascii = ns.network.AsciiTraceHelper()
csma.EnableAsciiAll(ascii.CreateFileStream("simple-routing-ping6.tr"))
csma.EnablePcapAll("simple-routing-ping6", True)
# Run Simulation
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-2.0 |
cchamberlain/npm-msys2 | node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| artistic-2.0 |
kemalakyol48/python-for-android | python-modules/twisted/twisted/test/test_stdio.py | 56 | 14110 | # Copyright (c) 2006-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
"""
import os, sys, itertools
from twisted.trial import unittest
from twisted.python import filepath, log
from twisted.python.runtime import platform
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
# A short string which is intended to appear here and nowhere else,
# particularly not in any random garbage output CPython unavoidable
# generates (such as in warning text and so forth). This is searched
# for in the output from stdio_test_lastwrite.py and if it is found at
# the end, the functionality works.
UNIQUE_LAST_WRITE_STRING = 'xyz123abc Twisted is great!'
skipWindowsNopywin32 = None
if platform.isWindows():
try:
import win32process
except ImportError:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
C{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or C{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def connectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not C{None}.
"""
self.data[name] = self.data.get(name, '') + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTestCase(unittest.TestCase):
skip = skipWindowsNopywin32
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
import twisted
subenv = dict(os.environ)
subenv['PYTHONPATH'] = os.pathsep.join(
[os.path.abspath(
os.path.dirname(os.path.dirname(twisted.__file__))),
subenv.get('PYTHONPATH', '')
])
args = [sys.executable,
filepath.FilePath(__file__).sibling(sibling).path,
reactor.__class__.__module__] + list(args)
return reactor.spawnProcess(
proto,
sys.executable,
args,
env=subenv,
**kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_loseconn.py', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
for line in file(errorLogFile):
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, 'stdio_test_halfclose.py', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, 'stdio_test_lastwrite.py', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError, e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_hostpeer.py')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.failUnless(host)
self.failUnless(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_write.py')
def processEnded(reason):
self.assertEquals(p.data[1], 'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_writeseq.py')
def processEnded(reason):
self.assertEquals(p.data[1], 'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
junkFile = file(junkPath, 'w')
for i in xrange(1024):
junkFile.write(str(i) + '\n')
junkFile.close()
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = range(100)
def connectionMade(ign):
if toWrite:
written.append(str(toWrite.pop()) + "\n")
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, 'stdio_test_producer.py')
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEquals(p.data[1], ''.join(written))
self.failIf(toWrite, "Connection lost with %d writes left to go." % (len(toWrite),))
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, 'stdio_test_consumer.py', junkPath)
def processEnded(reason):
self.assertEquals(p.data[1], file(junkPath).read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open('w')
self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs['stdin'] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(str(value))
break
reactor.callLater(0, spin)
reactor.callLater(0, spin)
# Once the connection is lost, make sure the counter is at the
# appropriate value.
def cbLost(reason):
self.assertEquals(count.next(), howMany + 1)
self.assertEquals(
path.getContent(),
''.join(map(str, range(howMany))))
onConnLost.addCallback(cbLost)
return onConnLost
if reactor.__class__.__name__ == 'EPollReactor':
test_normalFileStandardOut.skip = (
"epoll(7) does not support normal files. See #4429. "
"This should be a todo but technical limitations prevent "
"this.")
elif platform.isWindows():
test_normalFileStandardOut.skip = (
"StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
def test_normalFileStandardOutGoodEpollError(self):
"""
Using StandardIO with epollreactor with stdout redirected to a
normal file fails with a comprehensible error (until it is
supported, when #4429 is resolved). See also #2259 and #3442.
"""
path = filepath.FilePath(self.mktemp())
normal = path.open('w')
fd = normal.fileno()
self.addCleanup(normal.close)
exc = self.assertRaises(
RuntimeError,
stdio.StandardIO, protocol.Protocol(), stdout=fd)
self.assertEquals(
str(exc),
"This reactor does not support this type of file descriptor (fd "
"%d, mode %d) (for example, epollreactor does not support normal "
"files. See #4429)." % (fd, os.fstat(fd).st_mode))
if reactor.__class__.__name__ != 'EPollReactor':
test_normalFileStandardOutGoodEpollError.skip = (
"Only epollreactor is expected to fail with stdout redirected "
"to a normal file.")
| apache-2.0 |
dbbhattacharya/kitsune | vendor/packages/sqlalchemy/test/orm/test_naturalpks.py | 6 | 33858 | """
Primary key changing capabilities and passive/non-passive cascading updates.
"""
from sqlalchemy.test.testing import eq_, ne_, \
assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import Integer, String, ForeignKey, Unicode
from sqlalchemy.test.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session, backref
from sqlalchemy.orm.session import make_transient
from sqlalchemy.test.testing import eq_
from test.orm import _base, _fixtures
class NaturalPKTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
users = Table('users', metadata,
Column('username', String(50), primary_key=True),
Column('fullname', String(100)),
test_needs_fk=True)
addresses = Table('addresses', metadata,
Column('email', String(50), primary_key=True),
Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True)
items = Table('items', metadata,
Column('itemname', String(50), primary_key=True),
Column('description', String(100)),
test_needs_fk=True)
users_to_items = Table('users_to_items', metadata,
Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True),
Column('itemname', String(50),
ForeignKey('items.itemname', **fk_args),
primary_key=True),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
class Address(_base.ComparableEntity):
pass
class Item(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_entity(self):
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
u1.username = 'ed'
sess.flush()
def go():
assert sess.query(User).get('ed') is u1
self.assert_sql_count(testing.db, go, 0)
assert sess.query(User).get('jack') is None
sess.expunge_all()
u1 = sess.query(User).get('ed')
eq_(User(username='ed', fullname='jack'), u1)
@testing.resolve_artifact_names
def test_load_after_expire(self):
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
users.update(values={User.username:'jack'}).execute(username='ed')
# expire/refresh works off of primary key. the PK is gone
# in this case so theres no way to look it up. criterion-
# based session invalidation could solve this [ticket:911]
sess.expire(u1)
assert_raises(sa.orm.exc.ObjectDeletedError, getattr, u1, 'username')
sess.expunge_all()
assert sess.query(User).get('jack') is None
assert sess.query(User).get('ed').fullname == 'jack'
@testing.resolve_artifact_names
def test_flush_new_pk_after_expire(self):
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
sess.expire(u1)
u1.username = 'ed'
sess.flush()
sess.expunge_all()
assert sess.query(User).get('ed').fullname == 'jack'
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
@testing.resolve_artifact_names
def _test_onetomany(self, passive_updates):
mapper(User, users, properties={
'addresses':relationship(Address, passive_updates=passive_updates)
})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
u1.addresses.append(Address(email='jack1'))
u1.addresses.append(Address(email='jack2'))
sess.add(u1)
sess.flush()
assert sess.query(Address).get('jack1') is u1.addresses[0]
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get('ed')
u1.username = 'jack'
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False;
#load addresses, update user, update 2 addresses
self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack', addresses=[
Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get('jack')
u1 = sess.query(User).get('jack')
u1.addresses = []
u1.username = 'fred'
sess.flush()
sess.expunge_all()
assert sess.query(Address).get('jack1').username is None
u1 = sess.query(User).get('fred')
eq_(User(username='fred', fullname='jack'), u1)
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_manytoone_passive(self):
self._test_manytoone(True)
def test_manytoone_nonpassive(self):
self._test_manytoone(False)
@testing.resolve_artifact_names
def _test_manytoone(self, passive_updates):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
a1 = Address(email='jack1')
a2 = Address(email='jack2')
u1 = User(username='jack', fullname='jack')
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = 'ed'
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
assert a1.username == a2.username == 'ed'
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetoone_passive(self):
self._test_onetoone(True)
def test_onetoone_nonpassive(self):
self._test_onetoone(False)
@testing.resolve_artifact_names
def _test_onetoone(self, passive_updates):
mapper(User, users, properties={
"address":relationship(Address, passive_updates=passive_updates,
uselist=False)
})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
a1 = Address(email='jack1')
u1.address = a1
sess.add(a1)
sess.flush()
u1.username = 'ed'
def go():
sess.flush()
if passive_updates:
sess.expire(u1, ['address'])
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
eq_([Address(username='ed')], sess.query(Address).all())
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_bidirectional_passive(self):
self._test_bidirectional(True)
def test_bidirectional_nonpassive(self):
self._test_bidirectional(False)
@testing.resolve_artifact_names
def _test_bidirectional(self, passive_updates):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates,
backref='addresses')})
sess = create_session()
a1 = Address(email='jack1')
a2 = Address(email='jack2')
u1 = User(username='jack', fullname='jack')
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = 'ed'
(ad1, ad2) = sess.query(Address).all()
eq_([Address(username='jack'), Address(username='jack')], [ad1, ad2])
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
eq_([Address(username='ed'), Address(username='ed')], [ad1, ad2])
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get('ed')
assert len(u1.addresses) == 2 # load addresses
u1.username = 'fred'
def go():
sess.flush()
# check that the passive_updates is on on the other side
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
sess.expunge_all()
eq_([Address(username='fred'), Address(username='fred')],
sess.query(Address).all())
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_manytomany_passive(self):
self._test_manytomany(True)
# mysqldb executemany() of the association table fails to
# report the correct row count
@testing.fails_if(lambda: testing.against('mysql')
and not testing.against('+zxjdbc'))
def test_manytomany_nonpassive(self):
self._test_manytomany(False)
@testing.resolve_artifact_names
def _test_manytomany(self, passive_updates):
mapper(User, users, properties={
'items':relationship(Item, secondary=users_to_items,
backref='users',
passive_updates=passive_updates)})
mapper(Item, items)
sess = create_session()
u1 = User(username='jack')
u2 = User(username='fred')
i1 = Item(itemname='item1')
i2 = Item(itemname='item2')
u1.items.append(i1)
u1.items.append(i2)
i2.users.append(u2)
sess.add(u1)
sess.add(u2)
sess.flush()
r = sess.query(Item).all()
# ComparableEntity can't handle a comparison with the backrefs
# involved....
eq_(Item(itemname='item1'), r[0])
eq_(['jack'], [u.username for u in r[0].users])
eq_(Item(itemname='item2'), r[1])
eq_(['jack', 'fred'], [u.username for u in r[1].users])
u2.username='ed'
def go():
sess.flush()
go()
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
r = sess.query(Item).all()
eq_(Item(itemname='item1'), r[0])
eq_(['jack'], [u.username for u in r[0].users])
eq_(Item(itemname='item2'), r[1])
eq_(['ed', 'jack'], sorted([u.username for u in r[1].users]))
sess.expunge_all()
u2 = sess.query(User).get(u2.username)
u2.username='wendy'
sess.flush()
r = sess.query(Item).with_parent(u2).all()
eq_(Item(itemname='item2'), r[0])
class TransientExceptionTesst(_fixtures.FixtureTest):
run_inserts = None
@testing.resolve_artifact_names
def test_transient_exception(self):
"""An object that goes from a pk value to transient/pending
doesn't count as a "pk" switch.
"""
mapper(User, users)
mapper(Address, addresses, properties={'user':relationship(User)})
sess = create_session()
u1 = User(id=5, name='u1')
ad1 = Address(email_address='e1', user=u1)
sess.add_all([u1, ad1])
sess.flush()
make_transient(u1)
u1.id = None
u1.username='u2'
sess.add(u1)
sess.flush()
eq_(ad1.user_id, 5)
sess.expire_all()
eq_(ad1.user_id, 5)
ne_(u1.id, 5)
ne_(u1.id, None)
eq_(sess.query(User).count(), 2)
class ReversePKsTest(_base.MappedTest):
"""reverse the primary keys of two entities and ensure bookkeeping
succeeds."""
@classmethod
def define_tables(cls, metadata):
Table(
'user', metadata,
Column('code', Integer, primary_key=True),
Column('status', Integer, primary_key=True),
Column('username', Unicode(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
def __init__(self, code, status, username):
self.code = code
self.status = status
self.username = username
@testing.resolve_artifact_names
def test_reverse(self):
PUBLISHED, EDITABLE, ARCHIVED = 1, 2, 3
mapper(User, user)
session = sa.orm.sessionmaker()()
a_published = User(1, PUBLISHED, u'a')
session.add(a_published)
session.commit()
a_editable = User(1, EDITABLE, u'a')
session.add(a_editable)
session.commit()
# do the switch in both directions -
# one or the other should raise the error
# based on platform dictionary ordering
a_published.status = ARCHIVED
a_editable.status = PUBLISHED
session.commit()
assert session.query(User).get([1, PUBLISHED]) is a_editable
assert session.query(User).get([1, ARCHIVED]) is a_published
a_published.status = PUBLISHED
a_editable.status = EDITABLE
session.commit()
assert session.query(User).get([1, PUBLISHED]) is a_published
assert session.query(User).get([1, EDITABLE]) is a_editable
class SelfReferentialTest(_base.MappedTest):
# mssql, mysql don't allow
# ON UPDATE on self-referential keys
__unsupported_on__ = ('mssql','mysql')
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('nodes', metadata,
Column('name', String(50), primary_key=True),
Column('parent', String(50),
ForeignKey('nodes.name', **fk_args)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class Node(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_one_to_many(self):
mapper(Node, nodes, properties={
'children': relationship(Node,
backref=sa.orm.backref('parentnode',
remote_side=nodes.c.name,
passive_updates=False),
passive_updates=False)})
sess = create_session()
n1 = Node(name='n1')
n1.children.append(Node(name='n11'))
n1.children.append(Node(name='n12'))
n1.children.append(Node(name='n13'))
sess.add(n1)
sess.flush()
n1.name = 'new n1'
sess.flush()
eq_(n1.children[1].parent, 'new n1')
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_many_to_one_passive(self):
self._test_many_to_one(True)
def test_many_to_one_nonpassive(self):
self._test_many_to_one(False)
@testing.resolve_artifact_names
def _test_many_to_one(self, passive):
mapper(Node, nodes, properties={
'parentnode':relationship(Node,
remote_side=nodes.c.name,
passive_updates=passive)
}
)
sess = create_session()
n1 = Node(name='n1')
n11 = Node(name='n11', parentnode=n1)
n12 = Node(name='n12', parentnode=n1)
n13 = Node(name='n13', parentnode=n1)
sess.add_all([n1, n11, n12, n13])
sess.flush()
n1.name = 'new n1'
sess.flush()
if passive:
sess.expire_all()
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
class NonPKCascadeTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('username', String(50), unique=True),
Column('fullname', String(100)),
test_needs_fk=True)
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('email', String(50)),
Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
class Address(_base.ComparableEntity):
pass
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
@testing.resolve_artifact_names
def _test_onetomany(self, passive_updates):
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
u1.addresses.append(Address(email='jack1'))
u1.addresses.append(Address(email='jack2'))
sess.add(u1)
sess.flush()
a1 = u1.addresses[0]
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('jack',), ('jack',)])
assert sess.query(Address).get(a1.id) is u1.addresses[0]
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get(u1.id)
u1.username = 'jack'
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False; load addresses,
# update user, update 2 addresses
self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack',
addresses=[Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get(u1.id)
sess.expunge_all()
u1 = sess.query(User).get(u1.id)
u1.addresses = []
u1.username = 'fred'
sess.flush()
sess.expunge_all()
a1 = sess.query(Address).get(a1.id)
eq_(a1.username, None)
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[(None,), (None,)])
u1 = sess.query(User).get(u1.id)
eq_(User(username='fred', fullname='jack'), u1)
class CascadeToFKPKTest(_base.MappedTest, testing.AssertsCompiledSQL):
"""A primary key mutation cascades onto a foreign key that is itself a
primary key."""
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('users', metadata,
Column('username', String(50), primary_key=True),
test_needs_fk=True)
Table('addresses', metadata,
Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True
),
Column('email', String(50), primary_key=True),
Column('etc', String(50)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
class Address(_base.ComparableEntity):
pass
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetomany_passive(self):
self._test_onetomany(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
def test_o2m_change_passive(self):
self._test_o2m_change(True)
def test_o2m_change_nonpassive(self):
self._test_o2m_change(False)
@testing.resolve_artifact_names
def _test_o2m_change(self, passive_updates):
"""Change the PK of a related entity to another.
"on update cascade" is not involved here, so the mapper has
to do the UPDATE itself.
"""
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1 = Address(username='ed', email='ed@host1')
u1 = User(username='ed', addresses=[a1])
u2 = User(username='jack')
sess.add_all([a1, u1, u2])
sess.flush()
a1.username = 'jack'
sess.flush()
def test_o2m_move_passive(self):
self._test_o2m_move(True)
def test_o2m_move_nonpassive(self):
self._test_o2m_move(False)
@testing.resolve_artifact_names
def _test_o2m_move(self, passive_updates):
"""Move the related entity to a different collection,
changing its PK.
"""
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1 = Address(username='ed', email='ed@host1')
u1 = User(username='ed', addresses=[a1])
u2 = User(username='jack')
sess.add_all([a1, u1, u2])
sess.flush()
u1.addresses.remove(a1)
u2.addresses.append(a1)
sess.flush()
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE '
'but requires referential integrity')
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
def test_change_m2o_passive(self):
self._test_change_m2o(True)
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_change_m2o_nonpassive(self):
self._test_change_m2o(False)
@testing.resolve_artifact_names
def _test_change_m2o(self, passive_updates):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
u1 = User(username='jack')
a1 = Address(user=u1, email='foo@bar')
sess.add_all([u1, a1])
sess.flush()
u1.username='edmodified'
sess.flush()
eq_(a1.username, 'edmodified')
sess.expire_all()
eq_(a1.username, 'edmodified')
def test_move_m2o_passive(self):
self._test_move_m2o(True)
def test_move_m2o_nonpassive(self):
self._test_move_m2o(False)
@testing.resolve_artifact_names
def _test_move_m2o(self, passive_updates):
# tests [ticket:1856]
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
u1 = User(username='jack')
u2 = User(username='ed')
a1 = Address(user=u1, email='foo@bar')
sess.add_all([u1, u2, a1])
sess.flush()
a1.user = u2
sess.flush()
@testing.resolve_artifact_names
def test_rowswitch_doesntfire(self):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=True)
})
sess = create_session()
u1 = User(username='ed')
a1 = Address(user=u1, email='ed@host1')
sess.add(u1)
sess.add(a1)
sess.flush()
sess.delete(u1)
sess.delete(a1)
u2 = User(username='ed')
a2 = Address(user=u2, email='ed@host1', etc='foo')
sess.add(u2)
sess.add(a2)
from sqlalchemy.test.assertsql import CompiledSQL
# test that the primary key columns of addresses are not
# being updated as well, since this is a row switch.
self.assert_sql_execution(testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET etc=:etc WHERE "
"addresses.username = :addresses_username AND"
" addresses.email = :addresses_email",
{'etc': 'foo', 'addresses_username':'ed',
'addresses_email':'ed@host1'} ),
)
@testing.resolve_artifact_names
def _test_onetomany(self, passive_updates):
"""Change the PK of a related entity via foreign key cascade.
For databases that require "on update cascade", the mapper
has to identify the row by the new value, not the old, when
it does the update.
"""
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(username='ed', email='ed@host1'),\
Address(username='ed', email='ed@host2')
u1 = User(username='ed', addresses=[a1, a2])
sess.add(u1)
sess.flush()
eq_(a1.username, 'ed')
eq_(a2.username, 'ed')
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
u1.username = 'jack'
a2.email='ed@host3'
sess.flush()
eq_(a1.username, 'jack')
eq_(a2.username, 'jack')
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('jack',), ('jack', )])
class JoinedInheritanceTest(_base.MappedTest):
"""Test cascades of pk->pk/fk on joined table inh."""
# mssql doesn't allow ON UPDATE on self-referential keys
__unsupported_on__ = ('mssql',)
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('person', metadata,
Column('name', String(50), primary_key=True),
Column('type', String(50), nullable=False),
test_needs_fk=True)
Table('engineer', metadata,
Column('name', String(50), ForeignKey('person.name', **fk_args),
primary_key=True),
Column('primary_language', String(50)),
Column('boss_name', String(50),
ForeignKey('manager.name', **fk_args)),
test_needs_fk=True
)
Table('manager', metadata,
Column('name', String(50),
ForeignKey('person.name', **fk_args),
primary_key=True),
Column('paperwork', String(50)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class Person(_base.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_pk_passive(self):
self._test_pk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_pk_nonpassive(self):
self._test_pk(False)
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_fk_passive(self):
self._test_fk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'mysql+zxjdbc',
'postgresql+zxjdbc')
def test_fk_nonpassive(self):
self._test_fk(False)
@testing.resolve_artifact_names
def _test_pk(self, passive_updates):
mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
})
mapper(Manager, manager, inherits=Person,
polymorphic_identity='manager')
sess = sa.orm.sessionmaker()()
e1 = Engineer(name='dilbert', primary_language='java')
sess.add(e1)
sess.commit()
e1.name = 'wally'
e1.primary_language = 'c++'
sess.commit()
@testing.resolve_artifact_names
def _test_fk(self, passive_updates):
mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
})
mapper(Manager, manager, inherits=Person,
polymorphic_identity='manager')
sess = sa.orm.sessionmaker()()
m1 = Manager(name='dogbert', paperwork='lots')
e1, e2 = \
Engineer(name='dilbert', primary_language='java', boss=m1),\
Engineer(name='wally', primary_language='c++', boss=m1)
sess.add_all([
e1, e2, m1
])
sess.commit()
m1.name = 'pointy haired'
e1.primary_language = 'scala'
e2.primary_language = 'cobol'
sess.commit()
| bsd-3-clause |
aljoscha/flink | flink-python/pyflink/datastream/data_stream.py | 2 | 54004 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Callable, Union, List
from pyflink.common import typeinfo, ExecutionConfig, Row
from pyflink.common.typeinfo import RowTypeInfo, Types, TypeInformation
from pyflink.common.watermark_strategy import WatermarkStrategy
from pyflink.datastream.functions import _get_python_env, FlatMapFunctionWrapper, FlatMapFunction, \
MapFunction, MapFunctionWrapper, Function, FunctionWrapper, SinkFunction, FilterFunction, \
FilterFunctionWrapper, KeySelectorFunctionWrapper, KeySelector, ReduceFunction, \
ReduceFunctionWrapper, CoMapFunction, CoFlatMapFunction, Partitioner, \
PartitionerFunctionWrapper, RuntimeContext, ProcessFunction, KeyedProcessFunction
from pyflink.datastream.utils import convert_to_python_obj
from pyflink.java_gateway import get_gateway
class DataStream(object):
"""
A DataStream represents a stream of elements of the same type. A DataStream can be transformed
into another DataStream by applying a transformation as for example:
::
>>> DataStream.map(MapFunctionImpl())
>>> DataStream.filter(FilterFunctionImpl())
"""
def __init__(self, j_data_stream):
self._j_data_stream = j_data_stream
def get_name(self) -> str:
"""
Gets the name of the current data stream. This name is used by the visualization and logging
during runtime.
:return: Name of the stream.
"""
return self._j_data_stream.getName()
def name(self, name: str) -> 'DataStream':
"""
Sets the name of the current data stream. This name is used by the visualization and logging
during runting.
:param name: Name of the stream.
:return: The named operator.
"""
self._j_data_stream.name(name)
return self
def uid(self, uid: str) -> 'DataStream':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStream':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStream':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int) -> 'DataStream':
"""
Sets the maximum parallelism of this operator.
The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
number of key groups used for partitioned state.
:param max_parallelism: Maximum parallelism.
:return: The operator with set maximum parallelism.
"""
self._j_data_stream.setMaxParallelism(max_parallelism)
return self
def get_type(self) -> TypeInformation:
"""
Gets the type of the stream.
:return: The type of the DataStream.
"""
return typeinfo._from_java_type(self._j_data_stream.getType())
def get_execution_environment(self):
"""
Returns the StreamExecutionEnvironment that was used to create this DataStream.
:return: The Execution Environment.
"""
from pyflink.datastream import StreamExecutionEnvironment
return StreamExecutionEnvironment(
j_stream_execution_environment=self._j_data_stream.getExecutionEnvironment())
def get_execution_config(self) -> ExecutionConfig:
return ExecutionConfig(j_execution_config=self._j_data_stream.getExecutionConfig())
def force_non_parallel(self) -> 'DataStream':
"""
Sets the parallelism and maximum parallelism of this operator to one. And mark this operator
cannot set a non-1 degree of parallelism.
:return: The operator with only one parallelism.
"""
self._j_data_stream.forceNonParallel()
return self
def set_buffer_timeout(self, timeout_millis: int) -> 'DataStream':
"""
Sets the buffering timeout for data produced by this operation. The timeout defines how long
data may linger ina partially full buffer before being sent over the network.
Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms
still sustain high throughput, even for jobs with high parallelism.
A value of '-1' means that the default buffer timeout should be used. A value of '0'
indicates that no buffering should happen, and all records/events should be immediately sent
through the network, without additional buffering.
:param timeout_millis: The maximum time between two output flushes.
:return: The operator with buffer timeout set.
"""
self._j_data_stream.setBufferTimeout(timeout_millis)
return self
def start_new_chain(self) -> 'DataStream':
"""
Starts a new task chain beginning at this operator. This operator will be chained (thread
co-located for increased performance) to any previous tasks even if possible.
:return: The operator with chaining set.
"""
self._j_data_stream.startNewChain()
return self
def disable_chaining(self) -> 'DataStream':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: str) -> 'DataStream':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name.
:return: This operator.
"""
self._j_data_stream.slotSharingGroup(slot_sharing_group)
return self
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a Map transformation on a DataStream. The transformation calls a MapFunction for
each element of the DataStream. Each MapFunction call returns exactly one element. The user
can also extend RichMapFunction to gain access to other features provided by the
RichFunction interface.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction):
if callable(func):
func = MapFunctionWrapper(func) # type: ignore
else:
raise TypeError("The input must be a MapFunction or a callable function")
from pyflink.fn_execution import flink_fn_execution_pb2
j_operator, j_output_type_info = _get_one_input_stream_operator(
self,
func, # type: ignore
flink_fn_execution_pb2.UserDefinedDataStreamFunction.MAP, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"Map",
j_output_type_info,
j_operator
))
def flat_map(self, func: Union[Callable, FlatMapFunction],
result_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a DataStream. The transformation calls a FlatMapFunction
for each element of the DataStream. Each FlatMapFunction call can return any number of
elements including none. The user can also extend RichFlatMapFunction to gain access to
other features provided by the RichFUnction.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param result_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction):
if callable(func):
func = FlatMapFunctionWrapper(func) # type: ignore
else:
raise TypeError("The input must be a FlatMapFunction or a callable function")
from pyflink.fn_execution import flink_fn_execution_pb2
j_operator, j_output_type_info = _get_one_input_stream_operator(
self,
func, # type: ignore
flink_fn_execution_pb2.UserDefinedDataStreamFunction.FLAT_MAP, # type: ignore
result_type)
return DataStream(self._j_data_stream.transform(
"FLAT_MAP",
j_output_type_info,
j_operator
))
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type_info: TypeInformation = None) -> 'KeyedStream':
"""
Creates a new KeyedStream that uses the provided key for partitioning its operator states.
:param key_selector: The KeySelector to be used for extracting the key for partitioning.
:param key_type_info: The type information describing the key type.
:return: The DataStream with partitioned state(i.e. KeyedStream).
"""
if callable(key_selector):
key_selector = KeySelectorFunctionWrapper(key_selector) # type: ignore
if not isinstance(key_selector, (KeySelector, KeySelectorFunctionWrapper)):
raise TypeError("Parameter key_selector should be a type of KeySelector.")
output_type_info = typeinfo._from_java_type(
self._j_data_stream.getTransformation().getOutputType())
is_key_pickled_byte_array = False
if key_type_info is None:
key_type_info = Types.PICKLED_BYTE_ARRAY()
is_key_pickled_byte_array = True
intermediate_map_stream = self.map(
lambda x: Row(key_selector.get_key(x), x), # type: ignore
output_type=Types.ROW([key_type_info, output_type_info]))
gateway = get_gateway()
JKeyByKeySelector = gateway.jvm.KeyByKeySelector
intermediate_map_stream.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_KEY_BY_MAP_OPERATOR_NAME)
key_stream = KeyedStream(
intermediate_map_stream._j_data_stream.keyBy(
JKeyByKeySelector(is_key_pickled_byte_array),
key_type_info.get_java_type_info()), output_type_info,
self)
return key_stream
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
"""
Applies a Filter transformation on a DataStream. The transformation calls a FilterFunction
for each element of the DataStream and retains only those element for which the function
returns true. Elements for which the function returns false are filtered. The user can also
extend RichFilterFunction to gain access to other features provided by the RichFunction
interface.
:param func: The FilterFunction that is called for each element of the DataStream.
:return: The filtered DataStream.
"""
class FilterFlatMap(FlatMapFunction):
def __init__(self, filter_func):
self._func = filter_func
def flat_map(self, value):
if self._func.filter(value):
yield value
if callable(func):
func = FilterFunctionWrapper(func) # type: ignore
elif not isinstance(func, FilterFunction):
raise TypeError("func must be a Callable or instance of FilterFunction.")
type_info = typeinfo._from_java_type(
self._j_data_stream.getTransformation().getOutputType())
data_stream = self.flat_map(FilterFlatMap(func), result_type=type_info)
data_stream.name("Filter")
return data_stream
def union(self, *streams: 'DataStream') -> 'DataStream':
"""
Creates a new DataStream by merging DataStream outputs of the same type with each other. The
DataStreams merged using this operator will be transformed simultaneously.
:param streams: The DataStream to union outputwith.
:return: The DataStream.
"""
j_data_streams = []
for data_stream in streams:
j_data_streams.append(data_stream._j_data_stream)
gateway = get_gateway()
JDataStream = gateway.jvm.org.apache.flink.streaming.api.datastream.DataStream
j_data_stream_arr = get_gateway().new_array(JDataStream, len(j_data_streams))
for i in range(len(j_data_streams)):
j_data_stream_arr[i] = j_data_streams[i]
j_united_stream = self._j_data_stream.union(j_data_stream_arr)
return DataStream(j_data_stream=j_united_stream)
def connect(self, ds: 'DataStream') -> 'ConnectedStreams':
"""
Creates a new 'ConnectedStreams' by connecting 'DataStream' outputs of (possible)
different types with each other. The DataStreams connected using this operator can
be used with CoFunctions to apply joint transformations.
:param ds: The DataStream with which this stream will be connected.
:return: The `ConnectedStreams`.
"""
return ConnectedStreams(self, ds)
def shuffle(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are shuffled uniformly
randomly to the next operation.
:return: The DataStream with shuffle partitioning set.
"""
return DataStream(self._j_data_stream.shuffle())
def project(self, *field_indexes: int) -> 'DataStream':
"""
Initiates a Project transformation on a Tuple DataStream.
Note that only Tuple DataStreams can be projected.
:param field_indexes: The field indexes of the input tuples that are retained. The order of
fields in the output tuple corresponds to the order of field indexes.
:return: The projected DataStream.
"""
if not isinstance(self.get_type(), typeinfo.TupleTypeInfo):
raise Exception('Only Tuple DataStreams can be projected.')
gateway = get_gateway()
j_index_arr = gateway.new_array(gateway.jvm.int, len(field_indexes))
for i in range(len(field_indexes)):
j_index_arr[i] = field_indexes[i]
return DataStream(self._j_data_stream.project(j_index_arr))
def rescale(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to a subset of instances of the next operation in a round-robin fashion.
The subset of downstream operations to which the upstream operation sends elements depends
on the degree of parallelism of both the upstream and downstream operation. For example, if
the upstream operation has parallelism 2 and the downstream operation has parallelism 4,
then one upstream operation would distribute elements to two downstream operations. If, on
the other hand, the downstream operation has parallelism 4 then two upstream operations will
distribute to one downstream operation while the other two upstream operations will
distribute to the other downstream operations.
In cases where the different parallelisms are not multiples of each one or several
downstream operations will have a differing number of inputs from upstream operations.
:return: The DataStream with rescale partitioning set.
"""
return DataStream(self._j_data_stream.rescale())
def rebalance(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to instances of the next operation in a round-robin fashion.
:return: The DataStream with rebalance partition set.
"""
return DataStream(self._j_data_stream.rebalance())
def forward(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are forwarded to the
local sub-task of the next operation.
:return: The DataStream with forward partitioning set.
"""
return DataStream(self._j_data_stream.forward())
def broadcast(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are broadcasted to every
parallel instance of the next operation.
:return: The DataStream with broadcast partitioning set.
"""
return DataStream(self._j_data_stream.broadcast())
def process(self, func: ProcessFunction, output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The ProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def assign_timestamps_and_watermarks(self, watermark_strategy: WatermarkStrategy) -> \
'DataStream':
"""
Assigns timestamps to the elements in the data stream and generates watermarks to signal
event time progress. The given {@link WatermarkStrategy} is used to create a
TimestampAssigner and WatermarkGenerator.
:param watermark_strategy: The strategy to generate watermarks based on event timestamps.
:return: The stream after the transformation, with assigned timestamps and watermarks.
"""
if watermark_strategy._timestamp_assigner is not None:
# user implement a TimestampAssigner, we need to extracted and generate watermarks with
# a custom Operator.
from pyflink.fn_execution import flink_fn_execution_pb2 as ffpb2
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(watermark_strategy._timestamp_assigner)
JDataStreamPythonFunction = gateway.jvm.DataStreamPythonFunction
j_data_stream_python_function = JDataStreamPythonFunction(
bytearray(serialized_func),
_get_python_env())
JDataStreamPythonFunctionInfo = gateway.jvm.DataStreamPythonFunctionInfo
j_data_stream_python_function_info = JDataStreamPythonFunctionInfo(
j_data_stream_python_function,
ffpb2.UserDefinedDataStreamFunction.TIMESTAMP_ASSIGNER) # type: ignore
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
j_output_type = self._j_data_stream.getType()
j_operator = gateway.jvm\
.org.apache.flink.streaming.api.operators.python\
.PythonTimestampsAndWatermarksOperator(
j_conf,
j_output_type,
j_data_stream_python_function_info,
watermark_strategy._j_watermark_strategy)
operator_name = gateway.jvm.org.apache.flink.streaming.api.operators.python\
.PythonTimestampsAndWatermarksOperator.STREAM_TIMESTAMP_AND_WATERMARK_OPERATOR_NAME
return DataStream(self._j_data_stream.transform(
operator_name,
j_output_type,
j_operator))
else:
# if user not specify a TimestampAssigner, then return directly assign the Java
# watermark strategy.
return DataStream(self._j_data_stream.assignTimestampsAndWatermarks(
watermark_strategy._j_watermark_strategy))
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
"""
Partitions a DataStream on the key returned by the selector, using a custom partitioner.
This method takes the key selector to get the key to partition on, and a partitioner that
accepts the key type.
Note that this method works only on single field keys, i.e. the selector cannet return
tuples of fields.
:param partitioner: The partitioner to assign partitions to keys.
:param key_selector: The KeySelector with which the DataStream is partitioned.
:return: The partitioned DataStream.
"""
if callable(key_selector):
key_selector = KeySelectorFunctionWrapper(key_selector) # type: ignore
if not isinstance(key_selector, (KeySelector, KeySelectorFunctionWrapper)):
raise TypeError("Parameter key_selector should be a type of KeySelector.")
if callable(partitioner):
partitioner = PartitionerFunctionWrapper(partitioner) # type: ignore
if not isinstance(partitioner, (Partitioner, PartitionerFunctionWrapper)):
raise TypeError("Parameter partitioner should be a type of Partitioner.")
gateway = get_gateway()
class PartitionCustomMapFunction(MapFunction):
"""
A wrapper class for partition_custom map function. It indicates that it is a partition
custom operation that we need to apply PythonPartitionCustomOperator
to run the map function.
"""
def __init__(self):
self.num_partitions = None
def open(self, runtime_context: RuntimeContext):
self.num_partitions = int(runtime_context.get_job_parameter(
"NUM_PARTITIONS", "-1"))
if self.num_partitions <= 0:
raise ValueError(
"The partition number should be a positive value, got %s"
% self.num_partitions)
def map(self, value):
partition = partitioner.partition(key_selector.get_key(value), self.num_partitions)
return Row(partition, value)
def __repr__(self) -> str:
return '_Flink_PartitionCustomMapFunction'
original_type_info = self.get_type()
intermediate_map_stream = self.map(PartitionCustomMapFunction(),
output_type=Types.ROW([Types.INT(), original_type_info]))
intermediate_map_stream.name(
gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_PARTITION_CUSTOM_MAP_OPERATOR_NAME)
JPartitionCustomKeySelector = gateway.jvm.PartitionCustomKeySelector
JIdParitioner = gateway.jvm.org.apache.flink.api.java.functions.IdPartitioner
intermediate_map_stream = DataStream(intermediate_map_stream._j_data_stream
.partitionCustom(JIdParitioner(),
JPartitionCustomKeySelector()))
values_map_stream = intermediate_map_stream.map(lambda x: x[1], original_type_info)
values_map_stream.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(values_map_stream._j_data_stream)
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
"""
Adds the given sink to this DataStream. Only streams with sinks added will be executed once
the StreamExecutionEnvironment.execute() method is called.
:param sink_func: The SinkFunction object.
:return: The closed DataStream.
"""
return DataStreamSink(self._j_data_stream.addSink(sink_func.get_java_function()))
def execute_and_collect(self, job_execution_name: str = None, limit: int = None) \
-> Union['CloseableIterator', list]:
"""
Triggers the distributed execution of the streaming dataflow and returns an iterator over
the elements of the given DataStream.
The DataStream application is executed in the regular distributed manner on the target
environment, and the events from the stream are polled back to this application process and
thread through Flink's REST API.
The returned iterator must be closed to free all cluster resources.
:param job_execution_name: The name of the job execution.
:param limit: The limit for the collected elements.
"""
if job_execution_name is None and limit is None:
return CloseableIterator(self._j_data_stream.executeAndCollect(), self.get_type())
elif job_execution_name is not None and limit is None:
return CloseableIterator(self._j_data_stream.executeAndCollect(job_execution_name),
self.get_type())
if job_execution_name is None and limit is not None:
return list(map(lambda data: convert_to_python_obj(data, self.get_type()),
self._j_data_stream.executeAndCollect(limit)))
else:
return list(map(lambda data: convert_to_python_obj(data, self.get_type()),
self._j_data_stream.executeAndCollect(job_execution_name, limit)))
def print(self, sink_identifier: str = None) -> 'DataStreamSink':
"""
Writes a DataStream to the standard output stream (stdout).
For each element of the DataStream the object string is writen.
NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink
worker, and is not fault tolerant.
:param sink_identifier: The string to prefix the output with.
:return: The closed DataStream.
"""
if sink_identifier is not None:
j_data_stream_sink = self._align_output_type()._j_data_stream.print(sink_identifier)
else:
j_data_stream_sink = self._align_output_type()._j_data_stream.print()
return DataStreamSink(j_data_stream_sink)
def _align_output_type(self) -> 'DataStream':
"""
Transform the pickled python object into String if the output type is PickledByteArrayInfo.
"""
output_type_info_class = self._j_data_stream.getTransformation().getOutputType().getClass()
if output_type_info_class.isAssignableFrom(
Types.PICKLED_BYTE_ARRAY().get_java_type_info()
.getClass()):
def python_obj_to_str_map_func(value):
if not isinstance(value, (str, bytes)):
value = str(value)
return value
transformed_data_stream = DataStream(
self.map(python_obj_to_str_map_func,
output_type=Types.STRING())._j_data_stream)
return transformed_data_stream
else:
return self
class DataStreamSink(object):
"""
A Stream Sink. This is used for emitting elements from a streaming topology.
"""
def __init__(self, j_data_stream_sink):
"""
The constructor of DataStreamSink.
:param j_data_stream_sink: A DataStreamSink java object.
"""
self._j_data_stream_sink = j_data_stream_sink
def name(self, name: str) -> 'DataStreamSink':
"""
Sets the name of this sink. THis name is used by the visualization and logging during
runtime.
:param name: The name of this sink.
:return: The named sink.
"""
self._j_data_stream_sink.name(name)
return self
def uid(self, uid: str) -> 'DataStreamSink':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream_sink.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStreamSink':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream_sink.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStreamSink':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream_sink.setParallelism(parallelism)
return self
def disable_chaining(self) -> 'DataStreamSink':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream_sink.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: str) -> 'DataStreamSink':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name.
:return: This operator.
"""
self._j_data_stream_sink.slotSharingGroup(slot_sharing_group)
return self
class KeyedStream(DataStream):
"""
A KeyedStream represents a DataStream on which operator state is partitioned by key using a
provided KeySelector. Typical operations supported by a DataStream are also possible on a
KeyedStream, with the exception of partitioning methods such as shuffle, forward and keyBy.
Reduce-style operations, such as reduce and sum work on elements that have the same key.
"""
def __init__(self, j_keyed_stream, original_data_type_info, origin_stream: DataStream):
"""
Constructor of KeyedStream.
:param j_keyed_stream: A java KeyedStream object.
:param original_data_type_info: Original data typeinfo.
:param origin_stream: The DataStream before key by.
"""
super(KeyedStream, self).__init__(j_data_stream=j_keyed_stream)
self._original_data_type_info = original_data_type_info
self._origin_stream = origin_stream
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
return self._values().map(func, output_type)
def flat_map(self, func: Union[Callable, FlatMapFunction], result_type: TypeInformation = None)\
-> 'DataStream':
return self._values().flat_map(func, result_type)
def reduce(self, func: Union[Callable, ReduceFunction]) -> 'DataStream':
"""
Applies a reduce transformation on the grouped data stream grouped on by the given
key position. The `ReduceFunction` will receive input values based on the key value.
Only input values with the same key will go to the same reducer.
Example:
::
>>> ds = env.from_collection([(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b'])
>>> ds.key_by(lambda x: x[1]).reduce(lambda a, b: a[0] + b[0], b[1])
:param func: The ReduceFunction that is called for each element of the DataStream.
:return: The transformed DataStream.
"""
if not isinstance(func, ReduceFunction):
if callable(func):
func = ReduceFunctionWrapper(func) # type: ignore
else:
raise TypeError("The input must be a ReduceFunction or a callable function!")
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
j_operator, j_output_type_info = \
_get_one_input_stream_operator(
self, func, UserDefinedDataStreamFunction.REDUCE) # type: ignore
return DataStream(self._j_data_stream.transform(
"Keyed Reduce",
j_output_type_info,
j_operator
))
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
return self._values().filter(func)
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
return self._values().add_sink(sink_func)
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type_info: TypeInformation = None) -> 'KeyedStream':
return self._origin_stream.key_by(key_selector, key_type_info)
def process(self, func: KeyedProcessFunction, # type: ignore
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The KeyedProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
if not isinstance(func, KeyedProcessFunction):
raise TypeError("KeyedProcessFunction is required for KeyedStream.")
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.KEYED_PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"KEYED PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def union(self, *streams) -> 'DataStream':
return self._values().union(*streams)
def shuffle(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def project(self, *field_indexes) -> 'DataStream':
return self._values().project(*field_indexes)
def rescale(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def rebalance(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def forward(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def broadcast(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def print(self, sink_identifier=None):
return self._values().print()
def _values(self) -> 'DataStream':
"""
Since python KeyedStream is in the format of Row(key_value, original_data), it is used for
getting the original_data.
"""
transformed_stream = super().map(lambda x: x[1], output_type=self._original_data_type_info)
transformed_stream.name(get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(transformed_stream._j_data_stream)
def set_parallelism(self, parallelism: int):
raise Exception("Set parallelism for KeyedStream is not supported.")
def name(self, name: str):
raise Exception("Set name for KeyedStream is not supported.")
def get_name(self) -> str:
raise Exception("Get name of KeyedStream is not supported.")
def uid(self, uid: str):
raise Exception("Set uid for KeyedStream is not supported.")
def set_uid_hash(self, uid_hash: str):
raise Exception("Set uid hash for KeyedStream is not supported.")
def set_max_parallelism(self, max_parallelism: int):
raise Exception("Set max parallelism for KeyedStream is not supported.")
def force_non_parallel(self):
raise Exception("Set force non-parallel for KeyedStream is not supported.")
def set_buffer_timeout(self, timeout_millis: int):
raise Exception("Set buffer timeout for KeyedStream is not supported.")
def start_new_chain(self) -> 'DataStream':
raise Exception("Start new chain for KeyedStream is not supported.")
def disable_chaining(self) -> 'DataStream':
raise Exception("Disable chaining for KeyedStream is not supported.")
def slot_sharing_group(self, slot_sharing_group: str) -> 'DataStream':
raise Exception("Setting slot sharing group for KeyedStream is not supported.")
class ConnectedStreams(object):
"""
ConnectedStreams represent two connected streams of (possibly) different data types.
Connected streams are useful for cases where operations on one stream directly
affect the operations on the other stream, usually via shared state between the streams.
An example for the use of connected streams would be to apply rules that change over time
onto another stream. One of the connected streams has the rules, the other stream the
elements to apply the rules to. The operation on the connected stream maintains the
current set of rules in the state. It may receive either a rule update and update the state
or a data element and apply the rules in the state to the element.
The connected stream can be conceptually viewed as a union stream of an Either type, that
holds either the first stream's type or the second stream's type.
"""
def __init__(self, stream1: DataStream, stream2: DataStream):
self.stream1 = stream1
self.stream2 = stream2
def key_by(self, key_selector1: Union[Callable, KeySelector],
key_selector2: Union[Callable, KeySelector],
key_type_info: TypeInformation = None) -> 'ConnectedStreams':
"""
KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 using keySelector1 and keySelector2 with explicit type information
for the common key type.
:param key_selector1: The `KeySelector` used for grouping the first input.
:param key_selector2: The `KeySelector` used for grouping the second input.
:param key_type_info: The type information of the common key type.
:return: The partitioned `ConnectedStreams`
"""
ds1 = self.stream1
ds2 = self.stream2
if isinstance(self.stream1, KeyedStream):
ds1 = self.stream1._origin_stream
if isinstance(self.stream2, KeyedStream):
ds2 = self.stream2._origin_stream
return ConnectedStreams(
ds1.key_by(key_selector1, key_type_info),
ds2.key_by(key_selector2, key_type_info))
def map(self, func: CoMapFunction, output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a CoMap transformation on a `ConnectedStreams` and maps the output to a common
type. The transformation calls a `CoMapFunction.map1` for each element of the first
input and `CoMapFunction.map2` for each element of the second input. Each CoMapFunction
call returns exactly one element.
:param func: The CoMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoMapFunction):
raise TypeError("The input function must be a CoMapFunction!")
# get connected stream
j_connected_stream = self.stream1._j_data_stream.connect(self.stream2._j_data_stream)
from pyflink.fn_execution import flink_fn_execution_pb2
j_operator, j_output_type = _get_two_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.CO_MAP, # type: ignore
output_type)
return DataStream(j_connected_stream.transform("Co-Map", j_output_type, j_operator))
def flat_map(self, func: CoFlatMapFunction, output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a CoFlatMap transformation on a `ConnectedStreams` and maps the output to a
common type. The transformation calls a `CoFlatMapFunction.flatMap1` for each element
of the first input and `CoFlatMapFunction.flatMap2` for each element of the second
input. Each CoFlatMapFunction call returns any number of elements including none.
:param func: The CoFlatMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoFlatMapFunction):
raise TypeError("The input must be a CoFlatMapFunction!")
# get connected stream
j_connected_stream = self.stream1._j_data_stream.connect(self.stream2._j_data_stream)
from pyflink.fn_execution import flink_fn_execution_pb2
j_operator, j_output_type = _get_two_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.CO_FLAT_MAP, # type: ignore
output_type)
return DataStream(j_connected_stream.transform("Co-Flat Map", j_output_type, j_operator))
def _is_keyed_stream(self):
return isinstance(self.stream1, KeyedStream) and isinstance(self.stream2, KeyedStream)
def _get_one_input_stream_operator(data_stream: DataStream,
func: Union[Function, FunctionWrapper],
func_type: int,
type_info: Union[TypeInformation, List] = None):
"""
Create a Java one input stream operator.
:param func: a function object that implements the Function interface.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:param type_info: the data type of the function output data.
:return: A Java operator which is responsible for execution user defined python function.
"""
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_input_types = data_stream._j_data_stream.getTransformation().getOutputType()
if type_info is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(type_info, list):
output_type_info = RowTypeInfo(type_info)
else:
output_type_info = type_info
j_output_type_info = output_type_info.get_java_type_info()
j_data_stream_python_function = gateway.jvm.DataStreamPythonFunction(
bytearray(serialized_func),
_get_python_env())
j_data_stream_python_function_info = gateway.jvm.DataStreamPythonFunctionInfo(
j_data_stream_python_function,
func_type)
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.REDUCE: # type: ignore
# set max bundle size to 1 to force synchronize process for reduce function.
j_conf.setInteger(gateway.jvm.org.apache.flink.python.PythonOptions.MAX_BUNDLE_SIZE, 1)
j_output_type_info = j_input_types.getTypeAt(1)
JDataStreamPythonFunctionOperator = gateway.jvm.PythonReduceOperator
elif func_type == UserDefinedDataStreamFunction.MAP: # type: ignore
if str(func) == '_Flink_PartitionCustomMapFunction':
JDataStreamPythonFunctionOperator = gateway.jvm.PythonPartitionCustomOperator
else:
JDataStreamPythonFunctionOperator = gateway.jvm.PythonMapOperator
elif func_type == UserDefinedDataStreamFunction.FLAT_MAP: # type: ignore
JDataStreamPythonFunctionOperator = gateway.jvm.PythonFlatMapOperator
elif func_type == UserDefinedDataStreamFunction.PROCESS: # type: ignore
JDataStreamPythonFunctionOperator = gateway.jvm.PythonProcessOperator
elif func_type == UserDefinedDataStreamFunction.KEYED_PROCESS: # type: ignore
JDataStreamPythonFunctionOperator = gateway.jvm.PythonKeyedProcessOperator
else:
raise TypeError("Unsupported function type: %s" % func_type)
j_python_function_operator = JDataStreamPythonFunctionOperator(
j_conf,
j_input_types,
j_output_type_info,
j_data_stream_python_function_info)
return j_python_function_operator, j_output_type_info
def _get_two_input_stream_operator(connected_streams: ConnectedStreams,
func: Union[Function, FunctionWrapper],
func_type: int,
type_info: TypeInformation):
"""
Create a Java two input stream operator.
:param func: a function object that implements the Function interface.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:param type_info: the data type of the function output data.
:return: A Java operator which is responsible for execution user defined python function.
"""
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_input_types1 = connected_streams.stream1._j_data_stream.getTransformation().getOutputType()
j_input_types2 = connected_streams.stream2._j_data_stream.getTransformation().getOutputType()
if type_info is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(type_info, list):
output_type_info = RowTypeInfo(type_info)
else:
output_type_info = type_info
j_output_type_info = output_type_info.get_java_type_info()
j_data_stream_python_function = gateway.jvm.DataStreamPythonFunction(
bytearray(serialized_func),
_get_python_env())
j_data_stream_python_function_info = gateway.jvm.DataStreamPythonFunctionInfo(
j_data_stream_python_function,
func_type)
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.CO_FLAT_MAP: # type: ignore
JTwoInputPythonFunctionOperator = gateway.jvm.PythonCoFlatMapOperator
elif func_type == UserDefinedDataStreamFunction.CO_MAP: # type: ignore
JTwoInputPythonFunctionOperator = gateway.jvm.PythonCoMapOperator
else:
raise TypeError("Unsupported function type: %s" % func_type)
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
j_python_data_stream_function_operator = JTwoInputPythonFunctionOperator(
j_conf,
j_input_types1,
j_input_types2,
j_output_type_info,
j_data_stream_python_function_info,
connected_streams._is_keyed_stream())
return j_python_data_stream_function_operator, j_output_type_info
class CloseableIterator(object):
"""
Representing an Iterator that is also auto closeable.
"""
def __init__(self, j_closeable_iterator, type_info: TypeInformation = None):
self._j_closeable_iterator = j_closeable_iterator
self._type_info = type_info
def __iter__(self):
return self
def __next__(self):
return self.next()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def next(self):
if not self._j_closeable_iterator.hasNext():
raise StopIteration('No more data.')
return convert_to_python_obj(self._j_closeable_iterator.next(), self._type_info)
def close(self):
self._j_closeable_iterator.close()
| apache-2.0 |
moves-rwth/storm | resources/3rdparty/googletest/googlemock/scripts/generator/cpp/utils.py | 19 | 1100 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| gpl-3.0 |
quasiben/bokeh | bokeh/models/map_plots.py | 6 | 2307 | """ Models for displaying maps in Bokeh plots.
"""
from __future__ import absolute_import
from ..core import validation
from ..core.validation.warnings import MISSING_RENDERERS, NO_DATA_RENDERERS
from ..core.validation.errors import REQUIRED_RANGE
from ..core.properties import HasProps, abstract
from ..core.properties import Enum, Float, Instance, Int, JSON, Override
from ..core.enums import MapType
from .plots import Plot
@abstract
class MapOptions(HasProps):
""" Abstract base class for map options' models.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the map.
""")
@abstract
class MapPlot(Plot):
""" Abstract base class for map plot models.
"""
class GMapOptions(MapOptions):
""" Options for GMapPlot objects.
"""
map_type = Enum(MapType, help="""
The `map type`_ to use for the GMapPlot.
.. _map type: https://developers.google.com/maps/documentation/javascript/reference#MapTypeId
""")
styles = JSON(help="""
A JSON array of `map styles`_ to use for the GMapPlot. Many example styles can
`be found here`_.
.. _map styles: https://developers.google.com/maps/documentation/javascript/reference#MapTypeStyle
.. _be found here: https://snazzymaps.com
""")
class GMapPlot(MapPlot):
""" A Bokeh Plot with a `Google Map`_ displayed underneath.
Data placed on this plot should be specified in decimal lat long coordinates e.g. 37.123, -122.404.
It will be automatically converted into the web mercator projection to display properly over
google maps tiles.
.. _Google Map: https://www.google.com/maps/
"""
# TODO (bev) map plot might not have these
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
pass
map_options = Instance(GMapOptions, help="""
Options for displaying the plot.
""")
border_fill_color = Override(default="#ffffff")
| bsd-3-clause |
malemburg/pythondotorg | downloads/models.py | 6 | 8486 | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils import timezone
from markupfield.fields import MarkupField
from boxes.models import Box
from cms.models import ContentManageable, NameSlugModel
from fastly.utils import purge_url
from pages.models import Page
from .managers import ReleaseManager
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'restructuredtext')
class OS(ContentManageable, NameSlugModel):
""" OS for Python release """
class Meta:
verbose_name = 'Operating System'
verbose_name_plural = 'Operating Systems'
ordering = ('name', )
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('download:download_os_list', kwargs={'os_slug': self.slug})
class Release(ContentManageable, NameSlugModel):
"""
A particular version release. Name field should be version number for
example: 3.3.4 or 2.7.6
"""
PYTHON1 = 1
PYTHON2 = 2
PYTHON3 = 3
PYTHON_VERSION_CHOICES = (
(PYTHON3, 'Python 3.x.x'),
(PYTHON2, 'Python 2.x.x'),
(PYTHON1, 'Python 1.x.x'),
)
version = models.IntegerField(default=PYTHON3, choices=PYTHON_VERSION_CHOICES)
is_latest = models.BooleanField(
verbose_name='Is this the latest release?',
default=False,
db_index=True,
help_text="Set this if this should be considered the latest release "
"for the major version. Previous 'latest' versions will "
"automatically have this flag turned off.",
)
is_published = models.BooleanField(
verbose_name='Is Published?',
default=False,
db_index=True,
help_text="Whether or not this should be considered a released/published version",
)
pre_release = models.BooleanField(
verbose_name='Pre-release',
default=False,
db_index=True,
help_text="Boolean to denote pre-release/beta/RC versions",
)
show_on_download_page = models.BooleanField(
default=True,
db_index=True,
help_text="Whether or not to show this release on the main /downloads/ page",
)
release_date = models.DateTimeField(default=timezone.now)
release_page = models.ForeignKey(Page, related_name='release', blank=True, null=True)
release_notes_url = models.URLField('Release Notes URL', blank=True)
content = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, default='')
objects = ReleaseManager()
class Meta:
verbose_name = 'Release'
verbose_name_plural = 'Releases'
ordering = ('name', )
get_latest_by = 'release_date'
def __str__(self):
return self.name
def get_absolute_url(self):
if not self.content.raw and self.release_page:
return self.release_page.get_absolute_url()
else:
return reverse('download:download_release_detail', kwargs={'release_slug': self.slug})
def download_file_for_os(self, os_slug):
""" Given an OS slug return the appropriate download file """
try:
file = self.files.get(os__slug=os_slug, download_button=True)
except ReleaseFile.DoesNotExist:
file = None
return file
def files_for_os(self, os_slug):
""" Return all files for this release for a given OS """
files = self.files.filter(os__slug=os_slug).order_by('-name')
return files
def update_supernav():
try:
latest_python2 = Release.objects.latest_python2()
except Release.DoesNotExist:
latest_python2 = None
try:
latest_python3 = Release.objects.latest_python3()
except Release.DoesNotExist:
latest_python3 = None
python_files = []
for o in OS.objects.all():
data = {
'os': o,
'python2': None,
'python3': None,
}
if latest_python2:
data['python2'] = latest_python2.download_file_for_os(o.slug)
if latest_python3:
data['python3'] = latest_python3.download_file_for_os(o.slug)
python_files.append(data)
content = render_to_string('downloads/supernav.html', {
'latest_python2': latest_python2,
'latest_python3': latest_python3,
'python_files': python_files,
})
box = Box.objects.get(label='supernav-python-downloads')
box.content = content
box.save()
# Update latest Sources box on Download landing page
if latest_python2:
latest_python2_source = latest_python2.download_file_for_os('source')
else:
latest_python2_source = None
if latest_python3:
latest_python3_source = latest_python3.download_file_for_os('source')
else:
latest_python3_source = None
source_box = Box.objects.get(label='download-sources')
source_content = render_to_string('downloads/download-sources-box.html', {
'latest_python2_source': latest_python2_source,
'latest_python3_source': latest_python3_source,
})
source_box.content = source_content
source_box.save()
def update_homepage_download_box():
try:
latest_python2 = Release.objects.latest_python2()
except Release.DoesNotExist:
latest_python2 = None
try:
latest_python3 = Release.objects.latest_python3()
except Release.DoesNotExist:
latest_python3 = None
content = render_to_string('downloads/homepage-downloads-box.html', {
'latest_python2': latest_python2,
'latest_python3': latest_python3,
})
box = Box.objects.get(label='homepage-downloads')
box.content = content
box.save()
@receiver(post_save, sender=Release)
def promote_latest_release(sender, instance, **kwargs):
""" Promote this release to be the latest if this flag is set """
# Skip in fixtures
if kwargs.get('raw', False):
return
if instance.is_latest:
# Demote all previous instances
Release.objects.filter(
version=instance.version
).exclude(
pk=instance.pk
).update(is_latest=False)
@receiver(post_save, sender=Release)
def purge_fastly_download_pages(sender, instance, **kwargs):
"""
Purge Fastly caches so new Downloads show up more quickly
"""
# Don't purge on fixture loads
if kwargs.get('raw', False):
return
# Only purge on published instances
if instance.is_published:
# Purge our common pages
purge_url('/downloads/')
purge_url('/downloads/latest/python2/')
purge_url('/downloads/latest/python3/')
purge_url('/downloads/mac-osx/')
purge_url('/downloads/source/')
purge_url('/downloads/windows/')
# Purge the release page itself
purge_url(instance.get_absolute_url())
@receiver(post_save, sender=Release)
def update_download_supernav(sender, instance, **kwargs):
""" Update download supernav """
# Skip in fixtures
if kwargs.get('raw', False):
return
if instance.is_published:
update_supernav()
update_homepage_download_box()
class ReleaseFile(ContentManageable, NameSlugModel):
"""
Individual files in a release. If a specific OS/release combo has multiple
versions for example Windows and MacOS 32 vs 64 bit each file needs to be
added separately
"""
os = models.ForeignKey(OS, related_name="releases", verbose_name='OS')
release = models.ForeignKey(Release, related_name="files")
description = models.TextField(blank=True)
is_source = models.BooleanField('Is Source Distribution', default=False)
url = models.URLField('URL', unique=True, db_index=True, help_text="Download URL")
gpg_signature_file = models.URLField(
'GPG SIG URL',
blank=True,
help_text="GPG Signature URL"
)
md5_sum = models.CharField('MD5 Sum', max_length=200, blank=True)
filesize = models.IntegerField(default=0)
download_button = models.BooleanField(default=False, help_text="Use for the supernav download button for this OS")
class Meta:
verbose_name = 'Release File'
verbose_name_plural = 'Release Files'
ordering = ('-release__is_published', 'release__name', 'os__name', 'name')
| apache-2.0 |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/python/google/httpd_utils.py | 182 | 7467 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class to help start/stop a local apache http server."""
import logging
import optparse
import os
import subprocess
import sys
import time
import urllib
import google.path_utils
import google.platform_utils
class HttpdNotStarted(Exception): pass
def UrlIsAlive(url):
"""Checks to see if we get an http response from |url|.
We poll the url 5 times with a 1 second delay. If we don't
get a reply in that time, we give up and assume the httpd
didn't start properly.
Args:
url: The URL to check.
Return:
True if the url is alive.
"""
wait_time = 5
while wait_time > 0:
try:
response = urllib.urlopen(url)
# Server is up and responding.
return True
except IOError:
pass
wait_time -= 1
# Wait a second and try again.
time.sleep(1)
return False
def ApacheConfigDir(start_dir):
"""Returns a path to the directory holding the Apache config files."""
return google.path_utils.FindUpward(start_dir, 'tools', 'python',
'google', 'httpd_config')
def GetCygserverPath(start_dir, apache2=False):
"""Returns the path to the directory holding cygserver.exe file."""
cygserver_path = None
if apache2:
cygserver_path = google.path_utils.FindUpward(start_dir, 'third_party',
'cygwin', 'usr', 'sbin')
return cygserver_path
def StartServer(document_root=None, output_dir=None, apache2=False):
"""Starts a local server on port 8000 using the basic configuration files.
Args:
document_root: If present, specifies the document root for the server;
otherwise, the filesystem's root (e.g., C:/ or /) will be used.
output_dir: If present, specifies where to put server logs; otherwise,
they'll be placed in the system's temp dir (e.g., $TEMP or /tmp).
apache2: boolean if true will cause this function to configure
for Apache 2.x as opposed to Apache 1.3.x
Returns: the ApacheHttpd object that was created
"""
script_dir = google.path_utils.ScriptDir()
platform_util = google.platform_utils.PlatformUtility(script_dir)
if not output_dir:
output_dir = platform_util.GetTempDirectory()
if not document_root:
document_root = platform_util.GetFilesystemRoot()
apache_config_dir = ApacheConfigDir(script_dir)
if apache2:
httpd_conf_path = os.path.join(apache_config_dir, 'httpd2.conf')
else:
httpd_conf_path = os.path.join(apache_config_dir, 'httpd.conf')
mime_types_path = os.path.join(apache_config_dir, 'mime.types')
start_cmd = platform_util.GetStartHttpdCommand(output_dir,
httpd_conf_path,
mime_types_path,
document_root,
apache2=apache2)
stop_cmd = platform_util.GetStopHttpdCommand()
httpd = ApacheHttpd(start_cmd, stop_cmd, [8000],
cygserver_path=GetCygserverPath(script_dir, apache2))
httpd.StartServer()
return httpd
def StopServers(apache2=False):
"""Calls the platform's stop command on a newly created server, forcing it
to stop.
The details depend on the behavior of the platform stop command. For example,
it's often implemented to kill all running httpd processes, as implied by
the name of this function.
Args:
apache2: boolean if true will cause this function to configure
for Apache 2.x as opposed to Apache 1.3.x
"""
script_dir = google.path_utils.ScriptDir()
platform_util = google.platform_utils.PlatformUtility(script_dir)
httpd = ApacheHttpd('', platform_util.GetStopHttpdCommand(), [],
cygserver_path=GetCygserverPath(script_dir, apache2))
httpd.StopServer(force=True)
class ApacheHttpd(object):
def __init__(self, start_command, stop_command, port_list,
cygserver_path=None):
"""Args:
start_command: command list to call to start the httpd
stop_command: command list to call to stop the httpd if one has been
started. May kill all httpd processes running on the machine.
port_list: list of ports expected to respond on the local machine when
the server has been successfully started.
cygserver_path: Path to cygserver.exe. If specified, exe will be started
with server as well as stopped when server is stopped.
"""
self._http_server_proc = None
self._start_command = start_command
self._stop_command = stop_command
self._port_list = port_list
self._cygserver_path = cygserver_path
def StartServer(self):
if self._http_server_proc:
return
if self._cygserver_path:
cygserver_exe = os.path.join(self._cygserver_path, "cygserver.exe")
cygbin = google.path_utils.FindUpward(cygserver_exe, 'third_party',
'cygwin', 'bin')
env = os.environ
env['PATH'] += ";" + cygbin
subprocess.Popen(cygserver_exe, env=env)
logging.info('Starting http server')
self._http_server_proc = subprocess.Popen(self._start_command)
# Ensure that the server is running on all the desired ports.
for port in self._port_list:
if not UrlIsAlive('http://127.0.0.1:%s/' % str(port)):
raise HttpdNotStarted('Failed to start httpd on port %s' % str(port))
def StopServer(self, force=False):
"""If we started an httpd.exe process, or if force is True, call
self._stop_command (passed in on init so it can be platform-dependent).
This will presumably kill it, and may also kill any other httpd.exe
processes that are running.
"""
if force or self._http_server_proc:
logging.info('Stopping http server')
kill_proc = subprocess.Popen(self._stop_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.info('%s\n%s' % (kill_proc.stdout.read(),
kill_proc.stderr.read()))
self._http_server_proc = None
if self._cygserver_path:
subprocess.Popen(["taskkill.exe", "/f", "/im", "cygserver.exe"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def main():
# Provide some command line params for starting/stopping the http server
# manually.
option_parser = optparse.OptionParser()
option_parser.add_option('-k', '--server', help='Server action (start|stop)')
option_parser.add_option('-r', '--root', help='Document root (optional)')
option_parser.add_option('-a', '--apache2', action='store_true',
default=False, help='Starts Apache 2 instead of Apache 1.3 (default). '
'Ignored on Mac (apache2 is used always)')
options, args = option_parser.parse_args()
if not options.server:
print ("Usage: %s -k {start|stop} [-r document_root] [--apache2]" %
sys.argv[0])
return 1
document_root = None
if options.root:
document_root = options.root
if 'start' == options.server:
StartServer(document_root, apache2=options.apache2)
else:
StopServers(apache2=options.apache2)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
savanu/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py | 11 | 11155 | import sys
import os
import unittest
sys.path.insert(0, os.path.abspath(".."))
from cStringIO import StringIO
from .. import parser
from ..parser import token_types
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.tokenizer = parser.Tokenizer()
def tokenize(self, input_str):
rv = []
for item in self.tokenizer.tokenize(StringIO(input_str)):
rv.append(item)
if item[0] == token_types.eof:
break
return rv
def compare(self, input_text, expected):
expected = expected + [(token_types.eof, None)]
actual = self.tokenize(input_text)
self.assertEquals(actual, expected)
def test_heading_0(self):
self.compare("""[Heading text]""",
[(token_types.paren, "["),
(token_types.string, "Heading text"),
(token_types.paren, "]")])
def test_heading_1(self):
self.compare("""[Heading [text\]]""",
[(token_types.paren, "["),
(token_types.string, "Heading [text]"),
(token_types.paren, "]")])
def test_heading_2(self):
self.compare("""[Heading #text]""",
[(token_types.paren, "["),
(token_types.string, "Heading #text"),
(token_types.paren, "]")])
def test_heading_3(self):
self.compare("""[Heading [\]text]""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("[Heading")
def test_heading_5(self):
self.compare("""[Heading [\]text] #comment""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_6(self):
self.compare(r"""[Heading \ttext]""",
[(token_types.paren, "["),
(token_types.string, "Heading \ttext"),
(token_types.paren, "]")])
def test_key_0(self):
self.compare("""key:value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_1(self):
self.compare("""key : value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_2(self):
self.compare("""key : val ue""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "val ue")])
def test_key_3(self):
self.compare("""key: value#comment""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""ke y: value""")
def test_key_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key""")
def test_key_6(self):
self.compare("""key: "value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_7(self):
self.compare("""key: 'value'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_8(self):
self.compare("""key: "#value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_9(self):
self.compare("""key: '#value\'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_10(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: "value""")
def test_key_11(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_12(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_13(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value' abc""")
def test_key_14(self):
self.compare(r"""key: \\nb""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, r"\nb")])
def test_list_0(self):
self.compare(
"""
key: []""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.list_end, "]")])
def test_list_1(self):
self.compare(
"""
key: [a, "b"]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_2(self):
self.compare(
"""
key: [a,
b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_3(self):
self.compare(
"""
key: [a, #b]
c]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "c"),
(token_types.list_end, "]")])
def test_list_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a #b]
c]""")
def test_list_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a \\
c]""")
def test_list_6(self):
self.compare(
"""key: [a , b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_expr_0(self):
self.compare(
"""
key:
if cond == 1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_1(self):
self.compare(
"""
key:
if cond == 1: value1
value2""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value1"),
(token_types.string, "value2")])
def test_expr_2(self):
self.compare(
"""
key:
if cond=="1": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.string, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_3(self):
self.compare(
"""
key:
if cond==1.1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_4(self):
self.compare(
"""
key:
if cond==1.1 and cond2 == "a": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.ident, "and"),
(token_types.ident, "cond2"),
(token_types.ident, "=="),
(token_types.string, "a"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_5(self):
self.compare(
"""
key:
if (cond==1.1 ): value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.paren, "("),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.paren, ")"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_6(self):
self.compare(
"""
key:
if "\\ttest": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.string, "\ttest"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_7(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1A: value""")
def test_expr_8(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1a: value""")
def test_expr_9(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1.1.1: value""")
def test_expr_10(self):
self.compare(
"""
key:
if 1.: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.number, "1."),
(token_types.separator, ":"),
(token_types.string, "value")])
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
krikru/tensorflow-opencl | tensorflow/python/kernel_tests/tensor_array_ops_test.py | 30 | 48184 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class TensorArrayTest(test.TestCase):
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def _testTensorArrayWritePackMaybeLegacy(self):
self._testTensorArrayWritePack(dtypes.float32)
self._testTensorArrayWritePack(dtypes.float64)
self._testTensorArrayWritePack(dtypes.int32)
self._testTensorArrayWritePack(dtypes.int64)
self._testTensorArrayWritePack(dtypes.complex64)
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
def _testTensorArrayWriteConcat(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
self._testTensorArrayWriteConcat(dtypes.int32)
self._testTensorArrayWriteConcat(dtypes.int64)
self._testTensorArrayWriteConcat(dtypes.complex64)
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayPackNotAllValuesAvailableFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError("Could not read from TensorArray index 1 "
"because it has not yet been written to."):
ta.write(0, [[4.0, 5.0]]).stack().eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
self._testTensorArrayPackNotAllValuesAvailableFails()
def _testTensorArrayUnpackRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype is dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
self._testTensorArrayUnpackRead(dtypes.float32)
self._testTensorArrayUnpackRead(dtypes.float64)
self._testTensorArrayUnpackRead(dtypes.int32)
self._testTensorArrayUnpackRead(dtypes.int64)
self._testTensorArrayUnpackRead(dtypes.complex64)
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Split an empty vector
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
self._testTensorArraySplitRead(dtypes.int32)
self._testTensorArraySplitRead(dtypes.int64)
self._testTensorArraySplitRead(dtypes.complex64)
self._testTensorArraySplitRead(dtypes.complex128)
self._testTensorArraySplitRead(dtypes.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
"TensorArray dtype is float but Op is trying to write dtype string"):
ta.write(-1, "wrong_type_scalar").flow.eval()
# Test writing to a negative index
with self.assertRaisesOpError(
"Tried to write to index -1 but array is not "
"resizeable and size is: 3"):
ta.write(-1, 3.0).flow.eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to write to index 3 but array is not "
"resizeable and size is: 3"):
ta.write(3, 3.0).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
r1 = w0.read(1)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 because "
"it has not yet been written to."):
r1.eval()
# Test reading from a negative index
with self.assertRaisesOpError(
r"Tried to read from index -1 but array size is: 3"):
ta.read(-1).eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to read from index 3 but array size is: 3"):
ta.read(3).eval()
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
ta.write(2, 3.0).write(2, 3.0).flow.eval()
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
with self.assertRaisesOpError(
r"TensorArray has inconsistent shapes. Index 0 has "
r"\(excepting dimension 0\) shape: \[\] but index 2 has \(excepting "
r"dimension 0\) shape: \[1\]"):
w3.concat().eval()
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]"):
ta.split([1.0, 2.0, 3.0], [1]).flow.eval()
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]
]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
with ops.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def _testTensorArrayGradientUnpackRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
def testCloseTensorArray(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=True) as session:
v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
ta = tensor_array_ops.TensorArray(
dtype=dtype,
tensor_array_name="foo",
size=0 if dynamic_size else 3,
dynamic_size=dynamic_size)
time_0 = array_ops.identity(0)
def body(time, ta_t, state):
sliced = array_ops.slice(
v0, begin=array_ops.stack([time, 0]), size=[1, -1])
sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time + 1, ta_t, state)
(unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
variables.global_variables_initializer().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array(
[state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :], grad_val[2, :]
])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.test_session():
num_steps = 100
acc = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=num_steps,
clear_after_read=False,
element_shape=tensor_shape.scalar())
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, acc: i < 5
def b(i, acc):
x1 = control_flow_ops.cond(
math_ops.equal(i, 0), lambda: x,
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
z = constant_op.constant(0.0)
def fn(i, acc):
return i + 1, acc.write(i, z)
_, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
[i1, acc1])
r = acc2.stack()
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllClose(31.0, grad.eval())
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
def testGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo"))
self.assertEqual("gradients",
self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
self.assertEqual("foo/gradients:0",
self._grad_source_for_name("foo/gradients"))
self.assertEqual("foo/gradients_0:0",
self._grad_source_for_name("foo/gradients_0"))
self.assertEqual("foo/gradients",
self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual("foo/gradients_0",
self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual("foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual("foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
def testWriteShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testPartlyUnknownShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testUnpackShape(self):
self._testUnpackShape()
def testSplitShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
dynamic_size=True,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayUnpackDynamic(self):
with self.test_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), sess.run(grad)[0])
def testTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
def testTensorArraySplitDynamic(self):
with self.test_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), sess.run(grad)[0])
def _testTensorArrayEvalEmpty(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
"zero-size TensorArrays."):
ta.stack().eval()
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
ta.unstack(array_ops.zeros([0, 3, 5]))
packed = ta.stack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def testTensorArrayScatterReadAndGradients(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/gpu:1"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
# parent device was ignored when creating the TensorArray
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
with ops.device("/gpu:0"):
# the first write sets the op's device
ta = ta.write(0, 1.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
with ops.device("/gpu:1"):
# subsequent writes do not modify the op's device
ta = ta.write(1, 1.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
ta_grad = ta.grad("grad")
self.assertTrue("gpu:0" in ta_grad.handle.device.lower())
self.assertTrue("gpu:0" in ta_grad.flow.device.lower())
# Similar tests for unpack and split
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
with ops.device("/gpu:0"):
ta = ta.unstack([1.0, 2.0])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
with ops.device("/gpu:1"):
ta = ta.unstack([1.0, 2.0])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
with ops.device("/gpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
with ops.device("/gpu:1"):
ta = ta.split([1.0, 2.0], [1, 1])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
def testTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
def _body(i, ta_i):
with ops.device("/gpu:0"):
return i + 1, ta_i.write(i, 0.0)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
self.assertTrue("gpu:0" in ta_out.handle.device.lower())
self.assertTrue("gpu:0" in ta.handle.device.lower())
def testTensorArrayLazyDeviceSettingDoesNotConfuseInitialAccess(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
with ops.device("/cpu:0"):
size = ta.size()
with ops.device("/gpu:0"):
ta = ta.write(0, 0.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
# This should use the TensorArray on /gpu:0
size_value, _ = session.run((size, ta.flow))
self.assertEqual(2, size_value)
def testTensorArrayIdentity(self):
with self.test_session() as session:
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = variables.Variable(0)
v1 = variables.Variable(0)
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
variables.global_variables_initializer().run()
read0_v, read1_v, size0_v, size1_v = session.run(
(read0, read1, size0, size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, v0.eval())
self.assertEqual(1, v1.eval())
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ramramps/mkdocs | mkdocs/commands/build.py | 21 | 9619 | # coding: utf-8
from __future__ import unicode_literals
from datetime import datetime
import io
import logging
import os
from jinja2.exceptions import TemplateNotFound
import jinja2
import json
from mkdocs import nav, search, utils
from mkdocs.relative_path_ext import RelativePathExtension
import mkdocs
log = logging.getLogger(__name__)
def convert_markdown(markdown_source, config, site_navigation=None):
"""
Convert the Markdown source file to HTML as per the config and
site_navigation. Return a tuple of the HTML as a string, the parsed table
of contents, and a dictionary of any metadata that was specified in the
Markdown file.
"""
extensions = [
RelativePathExtension(site_navigation, config['strict'])
] + config['markdown_extensions']
return utils.convert_markdown(
markdown_source=markdown_source,
extensions=extensions,
extension_configs=config['mdx_configs']
)
def get_global_context(nav, config):
"""
Given the SiteNavigation and config, generate the context which is relevant
to app pages.
"""
site_name = config['site_name']
if config['site_favicon']:
site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])
else:
site_favicon = None
page_description = config['site_description']
extra_javascript = utils.create_media_urls(nav, config['extra_javascript'])
extra_css = utils.create_media_urls(nav, config['extra_css'])
return {
'site_name': site_name,
'site_author': config['site_author'],
'favicon': site_favicon,
'page_description': page_description,
# Note that there's intentionally repetition here. Rather than simply
# provide the config dictionary we instead pass everything explicitly.
#
# This helps ensure that we can throughly document the context that
# gets passed to themes.
'repo_url': config['repo_url'],
'repo_name': config['repo_name'],
'nav': nav,
'base_url': nav.url_context.make_relative('/'),
'homepage_url': nav.homepage.url,
'site_url': config['site_url'],
'extra_css': extra_css,
'extra_javascript': extra_javascript,
'include_nav': config['include_nav'],
'include_next_prev': config['include_next_prev'],
'copyright': config['copyright'],
'google_analytics': config['google_analytics'],
'mkdocs_version': mkdocs.__version__,
'build_date_utc': datetime.utcnow(),
'config': config
}
def get_page_context(page, content, toc, meta, config):
"""
Generate the page context by extending the global context and adding page
specific variables.
"""
if page.is_homepage or page.title is None:
page_title = None
else:
page_title = page.title
if page.is_homepage:
page_description = config['site_description']
else:
page_description = None
if config['site_url']:
base = config['site_url']
if not base.endswith('/'):
base += '/'
canonical_url = utils.urljoin(
base, page.abs_url.lstrip('/'))
else:
canonical_url = None
return {
'page_title': page_title,
'page_description': page_description,
'content': content,
'toc': toc,
'meta': meta,
'canonical_url': canonical_url,
'current_page': page,
'previous_page': page.previous_page,
'next_page': page.next_page
}
def build_template(template_name, env, config, site_navigation=None):
log.debug("Building template: %s", template_name)
try:
template = env.get_template(template_name)
except TemplateNotFound:
return False
if site_navigation is not None:
context = get_global_context(site_navigation, config)
else:
context = {}
output_content = template.render(context)
output_path = os.path.join(config['site_dir'], template_name)
utils.write_file(output_content.encode('utf-8'), output_path)
return True
def _build_page(page, config, site_navigation, env, dump_json):
# Read the input file
input_path = os.path.join(config['docs_dir'], page.input_path)
try:
input_content = io.open(input_path, 'r', encoding='utf-8').read()
except IOError:
log.error('file not found: %s', input_path)
raise
# Process the markdown text
html_content, table_of_contents, meta = convert_markdown(
markdown_source=input_content,
config=config,
site_navigation=site_navigation
)
context = get_global_context(site_navigation, config)
context.update(get_page_context(
page, html_content, table_of_contents, meta, config
))
# Allow 'template:' override in md source files.
if 'template' in meta:
template = env.get_template(meta['template'][0])
else:
template = env.get_template('base.html')
# Render the template.
output_content = template.render(context)
# Write the output file.
output_path = os.path.join(config['site_dir'], page.output_path)
if dump_json:
json_context = {
'content': context['content'],
'title': context['current_page'].title,
'url': context['current_page'].abs_url,
'language': 'en',
}
json_output = json.dumps(json_context, indent=4).encode('utf-8')
utils.write_file(json_output, output_path.replace('.html', '.json'))
else:
utils.write_file(output_content.encode('utf-8'), output_path)
return html_content, table_of_contents, meta
def build_extra_templates(extra_templates, config, site_navigation=None):
log.debug("Building extra_templates page")
for extra_template in extra_templates:
input_path = os.path.join(config['docs_dir'], extra_template)
with io.open(input_path, 'r', encoding='utf-8') as template_file:
template = jinja2.Template(template_file.read())
if site_navigation is not None:
context = get_global_context(site_navigation, config)
else:
context = {}
output_content = template.render(context)
output_path = os.path.join(config['site_dir'], extra_template)
utils.write_file(output_content.encode('utf-8'), output_path)
def build_pages(config, dump_json=False):
"""
Builds all the pages and writes them into the build directory.
"""
site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])
loader = jinja2.FileSystemLoader(config['theme_dir'] + [config['mkdocs_templates'], ])
env = jinja2.Environment(loader=loader)
search_index = search.SearchIndex()
build_template('404.html', env, config, site_navigation)
if not build_template('search.html', env, config, site_navigation):
log.debug("Search is enabled but the theme doesn't contain a "
"search.html file. Assuming the theme implements search "
"within a modal.")
build_template('sitemap.xml', env, config, site_navigation)
build_extra_templates(config['extra_templates'], config, site_navigation)
for page in site_navigation.walk_pages():
try:
log.debug("Building page %s", page.input_path)
build_result = _build_page(page, config, site_navigation, env,
dump_json)
html_content, table_of_contents, _ = build_result
search_index.add_entry_from_context(
page, html_content, table_of_contents)
except Exception:
log.error("Error building page %s", page.input_path)
raise
search_index = search_index.generate_search_index()
json_output_path = os.path.join(config['site_dir'], 'mkdocs', 'search_index.json')
utils.write_file(search_index.encode('utf-8'), json_output_path)
def build(config, live_server=False, dump_json=False, clean_site_dir=False):
"""
Perform a full site build.
"""
if clean_site_dir:
log.info("Cleaning site directory")
utils.clean_directory(config['site_dir'])
if not live_server:
log.info("Building documentation to directory: %s", config['site_dir'])
if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):
log.info("The directory contains stale files. Use --clean to remove them.")
if dump_json:
build_pages(config, dump_json=True)
return
# Reversed as we want to take the media files from the builtin theme
# and then from the custom theme_dir so the custom versions take take
# precedence.
for theme_dir in reversed(config['theme_dir']):
log.debug("Copying static assets from theme: %s", theme_dir)
utils.copy_media_files(theme_dir, config['site_dir'])
log.debug("Copying static assets from the docs dir.")
utils.copy_media_files(config['docs_dir'], config['site_dir'])
log.debug("Building markdown pages.")
build_pages(config)
def site_directory_contains_stale_files(site_directory):
"""
Check if the site directory contains stale files from a previous build.
Right now the check returns true if the directory is not empty.
A more sophisticated approach should be found to trigger only if there are
files that won't be overwritten anyway.
"""
if os.path.exists(site_directory):
if os.listdir(site_directory):
return True
return False
| bsd-2-clause |
Samael500/flask-security | tests/test_misc.py | 3 | 6598 | # -*- coding: utf-8 -*-
"""
test_emails
~~~~~~~~~~~
Email functionality tests
"""
import pytest
from flask_security import Security
from flask_security.forms import LoginForm, RegisterForm, ConfirmRegisterForm, \
SendConfirmationForm, PasswordlessLoginForm, ForgotPasswordForm, ResetPasswordForm, \
ChangePasswordForm, TextField, PasswordField, email_required, email_validator, valid_user_email
from flask_security.utils import capture_reset_password_requests, md5, string_types
from utils import authenticate, init_app_with_options, populate_data
@pytest.mark.recoverable()
def test_async_email_task(app, client):
app.mail_sent = False
@app.security.send_mail_task
def send_email(msg):
app.mail_sent = True
client.post('/reset', data=dict(email='matt@lp.com'))
assert app.mail_sent is True
def test_register_blueprint_flag(app, sqlalchemy_datastore):
app.security = Security(app, datastore=Security, register_blueprint=False)
client = app.test_client()
response = client.get('/login')
assert response.status_code == 404
@pytest.mark.registerable()
@pytest.mark.recoverable()
@pytest.mark.changeable()
def test_basic_custom_forms(app, sqlalchemy_datastore):
class MyLoginForm(LoginForm):
email = TextField('My Login Email Address Field')
class MyRegisterForm(RegisterForm):
email = TextField('My Register Email Address Field')
class MyForgotPasswordForm(ForgotPasswordForm):
email = TextField('My Forgot Email Address Field',
validators=[email_required, email_validator, valid_user_email])
class MyResetPasswordForm(ResetPasswordForm):
password = TextField('My Reset Password Field')
class MyChangePasswordForm(ChangePasswordForm):
password = PasswordField('My Change Password Field')
app.security = Security(app,
datastore=sqlalchemy_datastore,
login_form=MyLoginForm,
register_form=MyRegisterForm,
forgot_password_form=MyForgotPasswordForm,
reset_password_form=MyResetPasswordForm,
change_password_form=MyChangePasswordForm)
populate_data(app)
client = app.test_client()
response = client.get('/login')
assert b'My Login Email Address Field' in response.data
response = client.get('/register')
assert b'My Register Email Address Field' in response.data
response = client.get('/reset')
assert b'My Forgot Email Address Field' in response.data
with capture_reset_password_requests() as requests:
response = client.post('/reset', data=dict(email='matt@lp.com'))
token = requests[0]['token']
response = client.get('/reset/' + token)
assert b'My Reset Password Field' in response.data
authenticate(client)
response = client.get('/change')
assert b'My Change Password Field' in response.data
@pytest.mark.registerable()
@pytest.mark.confirmable()
def test_confirmable_custom_form(app, sqlalchemy_datastore):
app.config['SECURITY_REGISTERABLE'] = True
app.config['SECURITY_CONFIRMABLE'] = True
class MyRegisterForm(ConfirmRegisterForm):
email = TextField('My Register Email Address Field')
class MySendConfirmationForm(SendConfirmationForm):
email = TextField('My Send Confirmation Email Address Field')
app.security = Security(app,
datastore=sqlalchemy_datastore,
send_confirmation_form=MySendConfirmationForm,
confirm_register_form=MyRegisterForm)
client = app.test_client()
response = client.get('/register')
assert b'My Register Email Address Field' in response.data
response = client.get('/confirm')
assert b'My Send Confirmation Email Address Field' in response.data
def test_passwordless_custom_form(app, sqlalchemy_datastore):
app.config['SECURITY_PASSWORDLESS'] = True
class MyPasswordlessLoginForm(PasswordlessLoginForm):
email = TextField('My Passwordless Email Address Field')
app.security = Security(app,
datastore=sqlalchemy_datastore,
passwordless_login_form=MyPasswordlessLoginForm)
client = app.test_client()
response = client.get('/login')
assert b'My Passwordless Email Address Field' in response.data
def test_addition_identity_attributes(app, sqlalchemy_datastore):
init_app_with_options(app, sqlalchemy_datastore, **{
'SECURITY_USER_IDENTITY_ATTRIBUTES': ('email', 'username')
})
client = app.test_client()
response = authenticate(client, email='matt', follow_redirects=True)
assert b'Hello matt@lp.com' in response.data
def test_flash_messages_off(app, sqlalchemy_datastore, get_message):
init_app_with_options(app, sqlalchemy_datastore, **{
'SECURITY_FLASH_MESSAGES': False
})
client = app.test_client()
response = client.get('/profile')
assert get_message('LOGIN') not in response.data
def test_invalid_hash_scheme(app, sqlalchemy_datastore, get_message):
with pytest.raises(ValueError):
init_app_with_options(app, sqlalchemy_datastore, **{
'SECURITY_PASSWORD_HASH': 'bogus'
})
def test_change_hash_type(app, sqlalchemy_datastore):
init_app_with_options(app, sqlalchemy_datastore, **{
'SECURITY_PASSWORD_SCHEMES': ['bcrypt', 'plaintext']
})
app.config['SECURITY_PASSWORD_HASH'] = 'bcrypt'
app.config['SECURITY_PASSWORD_SALT'] = 'salty'
app.security = Security(app, datastore=sqlalchemy_datastore, register_blueprint=False)
client = app.test_client()
response = client.post('/login', data=dict(email='matt@lp.com', password='password'))
assert response.status_code == 302
response = client.get('/logout')
response = client.post('/login', data=dict(email='matt@lp.com', password='password'))
assert response.status_code == 302
def test_md5():
data = md5(b'hello')
assert isinstance(data, string_types)
data = md5(u'hellö')
assert isinstance(data, string_types)
@pytest.mark.settings(password_salt=u'öööööööööööööööööööööööööööööööööö',
password_hash='bcrypt')
def test_password_unicode_password_salt(client):
response = authenticate(client)
assert response.status_code == 302
response = authenticate(client, follow_redirects=True)
assert b'Hello matt@lp.com' in response.data
| mit |
d7415/merlin | Hooks/target/book.py | 1 | 5513 | # This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql import asc
from Core.config import Config
from Core.paconf import PA
from Core.db import session
from Core.maps import Updates, Planet, Target
from Core.loadable import loadable, route, require_user
class book(loadable):
"""Book a target for attack. You should always book your targets, so someone doesn't inadvertedly piggy your attack."""
usage = " <x:y:z> (eta|landing tick) [later]"
@route(loadable.planet_coord+r"\s+(\d+)(?:\s+(y)\S*)?(?:\s+(l)\S*)?", access = "half")
@require_user
def execute(self, message, user, params):
planet = Planet.load(*params.group(1,3,5))
if planet is None:
message.alert("No planet with coords %s:%s:%s" % params.group(1,3,5))
return
tick = Updates.current_tick()
when = int(params.group(6))
if when < PA.getint("numbers", "protection"):
eta = when
when += tick
elif when <= tick:
message.alert("Can not book targets in the past. You wanted tick %s, but current tick is %s." % (when, tick,))
return
else:
eta = when - tick
if when > 32767:
when = 32767
override = params.group(7)
later = params.group(8)
if planet.intel and planet.alliance and planet.alliance.name == Config.get("Alliance","name"):
message.reply("%s:%s:%s is %s in %s. Quick, launch before they notice the highlight." % (planet.x,planet.y,planet.z, planet.intel.nick or 'someone', Config.get("Alliance","name"),))
return
free, book1, book2 = self.get_free_book(planet, when, later)
if free is None:
if later is None:
message.reply("Target %s:%s:%s is already booked for landing tick %s by user %s" % (planet.x,planet.y,planet.z, when, book1.user.name,))
else:
message.reply("You cannot hit %s:%s:%s. Not even sloppy seconds. This target is more taken than your mum, amirite?" % (planet.x,planet.y,planet.z,))
return
if override is None and later is None:
books = planet.bookings.filter(Target.tick >= when).order_by(asc(Target.tick)).all()
if len(books) >= 1:
reply = "There are already bookings for that target after landing pt %s (eta %s). To see status on this target, do !status %s:%s:%s." % (when,eta, planet.x,planet.y,planet.z,)
reply+= " To force booking at your desired eta/landing tick, use !book %s:%s:%s %s yes (Bookers: " %(planet.x,planet.y,planet.z, when,)
prev=[]
for book in books:
prev.append("(%s user:%s)" % (book.tick, book.user.name,))
reply += ", ".join(prev) + ")"
message.reply(reply)
return
if free == when:
reply = "Booked landing on %s:%s:%s tick %s (eta %s) for user %s" % (planet.x,planet.y,planet.z, free, (free-tick), user.name,)
elif free == when + 1:
reply = "You have been beaten to %s:%s:%s by %s. You are now getting sloppy seconds at tick %s (eta %s)" % (planet.x,planet.y,planet.z, book1.user.name, free, (free-tick),)
elif free == when + 2:
reply = "You've been beaten to %s:%s:%s by %s and %s you slow retarded faggot. I feel sorry for you, so have tick %s (eta %s)" % (planet.x,planet.y,planet.z, book1.user.name, book2.user.name, free, (free-tick),)
try:
planet.bookings.append(Target(user=user, tick=free))
session.commit()
message.reply(reply)
return
except IntegrityError:
session.rollback()
raise Exception("Integrity error? Unable to booking for pid %s and tick %s"%(planet.id, when,))
return
def get_free_book(self, planet, when, later):
book1 = planet.bookings.filter(Target.tick == when).first()
if book1 is None:
return when, None, None
if later is None:
return None, book1, None
when += 1
book2 = planet.bookings.filter(Target.tick == when).first()
if book2 is None:
return when, book1, None
when += 1
book3 = planet.bookings.filter(Target.tick == when).first()
if book3 is None:
return when, book1, book2
else:
return None, book1, book2
| gpl-2.0 |
winklerand/pandas | pandas/tests/plotting/test_deprecated.py | 1 | 1535 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
def test_scatter_plot_legacy(self):
tm._skip_if_no_scipy()
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
h3llrais3r/SickRage | lib/markupsafe/__init__.py | 144 | 10697 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import string
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
__version__ = "1.0"
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^& ;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(
self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
# Don't modify unexpected input.
return m.group()
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_simple_escaping_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
def format(*args, **kwargs):
self, args = args[0], args[1:]
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError('Unsupported format specification '
'for Markup.')
return self
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_simple_escaping_wrapper('__getslice__')
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, 'format'):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, '__html_format__'):
rv = value.__html_format__(format_spec)
elif hasattr(value, '__html__'):
if format_spec:
raise ValueError('No format specification allowed '
'when formatting an object with '
'its __html__ method.')
rv = value.__html__()
else:
# We need to make sure the format spec is unicode here as
# otherwise the wrong callback methods are invoked. For
# instance a byte string there would invoke __str__ and
# not __unicode__.
rv = string.Formatter.format_field(
self, value, text_type(format_spec))
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| gpl-3.0 |
imranj131/Brain-Soother | OpenBCI_Python/plugin_interface.py | 4 | 1586 |
"""
Extends Yapsy IPlugin interface to pass information about the board to plugins.
Fields of interest for plugins:
args: list of arguments passed to the plugins
sample_rate: actual sample rate of the board
eeg_channels: number of EEG
aux_channels: number of AUX channels
If needed, plugins that need to report an error can set self.is_activated to False during activate() call.
NB: because of how yapsy discovery system works, plugins must use the following syntax to inherit to use polymorphism (see http://yapsy.sourceforge.net/Advices.html):
import plugin_interface as plugintypes
class PluginExample(plugintypes.IPluginExtended):
...
"""
from yapsy.IPlugin import IPlugin
class IPluginExtended(IPlugin):
# args: passed by command line
def pre_activate(self, args, sample_rate=250, eeg_channels=8, aux_channels=3):
self.args = args
self.sample_rate = sample_rate
self.eeg_channels = eeg_channels
self.aux_channels = aux_channels
# by default we say that activation was okay -- inherited from IPlugin
self.is_activated = True
self.activate()
# tell outside world if init went good or bad
return self.is_activated
# inherited from IPlugin
def activate(self):
print "Plugin %s activated." % (self.__class__.__name__)
# inherited from IPlugin
def deactivate(self):
print "Plugin %s deactivated." % (self.__class__.__name__)
# plugins that require arguments should implement this method
def show_help(self):
print "I, %s, do not need any parameter." % (self.__class__.__name__)
| mit |
fhcrc/deenurp | deenurp/util.py | 1 | 6545 | """
Utility functions
"""
import bz2
import contextlib
import functools
import gzip
import itertools
import os
import os.path
import shutil
import sys
import time
import tempfile
from Bio import SeqIO
def apply_df_status(func, df, msg=''):
"""
"""
tmp_column = 'index_number'
row_count = float(len(df))
df[tmp_column] = xrange(int(row_count))
msg += ' {:.0%}\r'
def apply_func(item, msg):
sys.stderr.write(msg.format(item[tmp_column] / row_count))
return func(item)
df = df.apply(apply_func, args=[msg], axis=1)
return df.drop(tmp_column, axis=1)
class Counter(object):
"""
Count objects processed in iterable. By default, progress is written to
stderr every 0.3 seconds of work.
"""
def __init__(self, iterable, stream=sys.stderr, report_every=0.3,
prefix=''):
self._it = iter(iterable)
self.count = 0
self.stream = stream
self.report_every = report_every
self.prefix = prefix
self.start = time.clock()
self.last = 0
def _report(self):
if self.stream:
msg = '{0}{1:15d} [{2:10.2f}s]\r'
msg = msg.format(self.prefix, self.count, time.clock()-self.start)
self.stream.write(msg)
def __iter__(self):
for i in self._it:
yield i
self.count += 1
now = time.clock()
if now - self.last > self.report_every:
self._report()
self.last = now
class SingletonDefaultDict(dict):
"""
Dictionary-like object that returns the same value, regardless of key
"""
def __init__(self, val=None):
self.val = val
def __getitem__(self, key):
return self.val
def __contains__(self, key):
return True
def memoize(fn):
cache = {}
@functools.wraps(fn)
def inner(*args):
try:
return cache[args]
except KeyError:
result = fn(*args)
cache[args] = result
return result
inner.cache = cache
return inner
def unique(iterable, key=lambda x: x):
"""
Choose unique elements from iterable, using the value returned by `key` to
determine uniqueness.
"""
s = set()
for i in iterable:
k = key(i)
if k not in s:
s.add(k)
yield i
@contextlib.contextmanager
def nothing(obj=None):
"""
The least interesting context manager.
"""
yield obj
@contextlib.contextmanager
def ntf(**kwargs):
"""
Near-clone of tempfile.NamedTemporaryFile, but the file is deleted when the
context manager exits, rather than when it's closed.
"""
kwargs['delete'] = False
tf = tempfile.NamedTemporaryFile(**kwargs)
try:
with tf:
yield tf
finally:
os.unlink(tf.name)
@contextlib.contextmanager
def tempcopy(path, **kwargs):
"""
Create a temporary copy of ``path``, available for the duration of the
context manager
"""
prefix, suffix = os.path.splitext(os.path.basename(path))
a = {'prefix': prefix, 'suffix': suffix}
a.update(kwargs)
with open(path) as fp, ntf(**a) as tf:
shutil.copyfileobj(fp, tf)
tf.close()
yield tf.name
@contextlib.contextmanager
def tempdir(**kwargs):
"""
Create a temporary directory for the duration of the context manager,
removing on exit.
:returns: a partially applied os.path.join, with name of the temporary
directory as the first argument
Example:
>>> with tempdir(prefix='rubbish-') as td: # doctest: +SKIP
... print "Directory is:", td()
... print "Put some data in:", td('file1.txt')
Directory is: /tmp/rubbish-5AQFpo
Put some data in: /tmp/rubbish-5AQFpo/file1.txt
"""
td = tempfile.mkdtemp(**kwargs)
try:
yield functools.partial(os.path.join, td)
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def as_fasta(sequences, **kwargs):
"""
Write sequences to a temporary FASTA file. returns the name
"""
if 'suffix' not in kwargs:
kwargs['suffix'] = '.fasta'
with ntf(**kwargs) as tf:
SeqIO.write(sequences, tf, 'fasta')
tf.flush()
tf.close()
yield tf.name
@contextlib.contextmanager
def maybe_tempfile(obj=None, **kwargs):
"""
Returns a tempfile for the duration of the contextmanager if obj is not
provided, otherwise returns obj.
"""
if obj is not None:
yield obj
else:
with ntf(**kwargs) as tf:
yield tf
@contextlib.contextmanager
def cd(path):
"""
Change directory to `path` for the duration of the context manager
"""
curdir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(curdir)
def file_opener(mode='r', buffering=-1):
"""
Returns a function that behaves similarly to ``open(...)``,
but opens compressed files for certain matching extensions, currently
``.bz2`` is treated as bzip2-compression, and ``.gz`` is treated as gzip.
"""
def open_file(f):
out = None
if f is sys.stdout or f is sys.stdin:
out = f
elif f == '-':
out = sys.stdin if 'r' in mode else sys.stdout
elif f.endswith('.bz2'):
out = bz2.BZ2File(f, mode=mode, buffering=buffering)
elif f.endswith('.gz'):
out = gzip.open(f, mode=mode)
else:
out = open(f, mode=mode, buffering=buffering)
return out
return open_file
def which(executable_name, dirs=None):
"""
Find an executable in dirs.
If ``dirs`` is not specified, searches $PATH
"""
if not dirs:
dirs = os.environ['PATH'].split(os.pathsep)
search_paths = (os.path.join(p, executable_name) for p in dirs)
executable_paths = (i for i in search_paths
if os.path.exists(i) and os.access(i, os.EX_OK))
try:
return next(executable_paths)
except StopIteration:
return None
class MissingDependencyError(ValueError):
pass
def require_executable(executable_name):
if not which(executable_name):
raise MissingDependencyError(executable_name)
def chunker(iterable, n, fillvalue=None):
"""
Continuously chunk an iterator n items at a time
"""
while True:
chunk = list(itertools.islice(iterable, n))
if chunk:
yield chunk
else:
return
| gpl-3.0 |
zixan/bitcoin | qa/rpc-tests/httpbasics.py | 30 | 4470 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rpc http basics
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urlparse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out2 = conn.getresponse().read();
assert_equal('"error":null' in out1, True) #must also response with a correct json-rpc message
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + base64.b64encode(authpair), "Connection": "keep-alive"}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out2 = conn.getresponse().read();
assert_equal('"error":null' in out1, True) #must also response with a correct json-rpc message
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + base64.b64encode(authpair), "Connection":"close"}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, False) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urlparse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urlparse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, True) #connection must be closed because bitcoind should use keep-alive by default
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit |
llhe/tensorflow | tensorflow/contrib/saved_model/python/saved_model/__init__.py | 109 | 1191 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
# pylint: enable=wildcard-import
| apache-2.0 |
Lekanich/intellij-community | plugins/hg4idea/testData/bin/mercurial/hgweb/request.py | 96 | 5087 | # hgweb/request.py - An http request from either CGI or the standalone server.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import socket, cgi, errno
from mercurial import util
from common import ErrorResponse, statusmessage, HTTP_NOT_MODIFIED
shortcuts = {
'cl': [('cmd', ['changelog']), ('rev', None)],
'sl': [('cmd', ['shortlog']), ('rev', None)],
'cs': [('cmd', ['changeset']), ('node', None)],
'f': [('cmd', ['file']), ('filenode', None)],
'fl': [('cmd', ['filelog']), ('filenode', None)],
'fd': [('cmd', ['filediff']), ('node', None)],
'fa': [('cmd', ['annotate']), ('filenode', None)],
'mf': [('cmd', ['manifest']), ('manifest', None)],
'ca': [('cmd', ['archive']), ('node', None)],
'tags': [('cmd', ['tags'])],
'tip': [('cmd', ['changeset']), ('node', ['tip'])],
'static': [('cmd', ['static']), ('file', None)]
}
def normalize(form):
# first expand the shortcuts
for k in shortcuts.iterkeys():
if k in form:
for name, value in shortcuts[k]:
if value is None:
value = form[k]
form[name] = value
del form[k]
# And strip the values
for k, v in form.iteritems():
form[k] = [i.strip() for i in v]
return form
class wsgirequest(object):
def __init__(self, wsgienv, start_response):
version = wsgienv['wsgi.version']
if (version < (1, 0)) or (version >= (2, 0)):
raise RuntimeError("Unknown and unsupported WSGI version %d.%d"
% version)
self.inp = wsgienv['wsgi.input']
self.err = wsgienv['wsgi.errors']
self.threaded = wsgienv['wsgi.multithread']
self.multiprocess = wsgienv['wsgi.multiprocess']
self.run_once = wsgienv['wsgi.run_once']
self.env = wsgienv
self.form = normalize(cgi.parse(self.inp,
self.env,
keep_blank_values=1))
self._start_response = start_response
self.server_write = None
self.headers = []
def __iter__(self):
return iter([])
def read(self, count=-1):
return self.inp.read(count)
def drain(self):
'''need to read all data from request, httplib is half-duplex'''
length = int(self.env.get('CONTENT_LENGTH') or 0)
for s in util.filechunkiter(self.inp, limit=length):
pass
def respond(self, status, type, filename=None, body=None):
if self._start_response is not None:
self.headers.append(('Content-Type', type))
if filename:
filename = (filename.split('/')[-1]
.replace('\\', '\\\\').replace('"', '\\"'))
self.headers.append(('Content-Disposition',
'inline; filename="%s"' % filename))
if body is not None:
self.headers.append(('Content-Length', str(len(body))))
for k, v in self.headers:
if not isinstance(v, str):
raise TypeError('header value must be string: %r' % (v,))
if isinstance(status, ErrorResponse):
self.headers.extend(status.headers)
if status.code == HTTP_NOT_MODIFIED:
# RFC 2616 Section 10.3.5: 304 Not Modified has cases where
# it MUST NOT include any headers other than these and no
# body
self.headers = [(k, v) for (k, v) in self.headers if
k in ('Date', 'ETag', 'Expires',
'Cache-Control', 'Vary')]
status = statusmessage(status.code, status.message)
elif status == 200:
status = '200 Script output follows'
elif isinstance(status, int):
status = statusmessage(status)
self.server_write = self._start_response(status, self.headers)
self._start_response = None
self.headers = []
if body is not None:
self.write(body)
self.server_write = None
def write(self, thing):
if thing:
try:
self.server_write(thing)
except socket.error, inst:
if inst[0] != errno.ECONNRESET:
raise
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
return None
def close(self):
return None
def wsgiapplication(app_maker):
'''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
can and should now be used as a WSGI application.'''
application = app_maker()
def run_wsgi(env, respond):
return application(env, respond)
return run_wsgi
| apache-2.0 |
mtlchun/edx | common/djangoapps/student/migrations/0039_auto__del_courseregistrationcode.py | 114 | 13511 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'CourseRegistrationCode'
db.delete_table('student_courseregistrationcode')
def backwards(self, orm):
# Adding model 'CourseRegistrationCode'
db.create_table('student_courseregistrationcode', (
('code', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('transaction_group_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=255, null=True, db_index=True)),
('redeemed_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='redeemed_by_user', null=True, to=orm['auth.User'])),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('redeemed_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 6, 25, 0, 0), null=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 6, 25, 0, 0))),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_by_user', to=orm['auth.User'])),
))
db.send_create_signal('student', ['CourseRegistrationCode'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
mkolar/pyblish-kredenc | test/nukestudio/validate_task.py | 2 | 1991 | from pyblish import api
from pyblish_bumpybox import inventory
class ValidateOutputRange(api.InstancePlugin):
"""Validate the output range of the task.
This compares the output range and clip associated with the task, so see
whether there is a difference. This difference indicates that the user has
selected to export the clip length for the task which is very uncommon to
do.
"""
order = inventory.get_order(__file__, "ValidateOutputRange")
families = ["trackItem.task"]
label = "Output Range"
hosts = ["nukestudio"]
optional = True
def process(self, instance):
task = instance.data["task"]
item = instance.data["parent"]
output_range = task.outputRange()
first_frame = int(item.data["item"].source().sourceIn())
last_frame = int(item.data["item"].source().sourceOut())
clip_duration = last_frame - first_frame + 1
difference = clip_duration - output_range[1]
failure_message = (
'Looks like you are rendering the clip length for the task '
'rather than the cut length. If this is intended, just uncheck '
'this validator after resetting, else adjust the export range in '
'the "Handles" section of the export dialog.'
)
assert difference, failure_message
class ValidateImageSequence(api.InstancePlugin):
"""Validate image sequence output path is setup correctly."""
order = inventory.get_order(__file__, "ValidateImageSequence")
families = ["trackItem.task", "img"]
match = api.Subset
label = "Image Sequence"
hosts = ["nukestudio"]
optional = True
def process(self, instance):
resolved_path = instance.data["task"].resolvedExportPath()
msg = (
"Image sequence output is missing a padding. Please add \"####\" "
"or \"%04d\" to the output templates."
)
assert "#" in resolved_path or "%" in resolved_path, msg
| lgpl-3.0 |
Kovak/Kivy-DriveSync | kivy_drivesync/oauth2client/util.py | 174 | 5670 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
__author__ = ['rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
__all__ = [
'positional',
'POSITIONAL_WARNING',
'POSITIONAL_EXCEPTION',
'POSITIONAL_IGNORE',
]
import inspect
import logging
import types
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style key-word only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
util.positional_parameters_enforcement, which may be set to
POSITIONAL_EXCEPTION, POSITIONAL_WARNING or POSITIONAL_IGNORE to raise an
exception, log a warning, or do nothing, respectively, if a declaration is
violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a key-word only argument is provided as a positional
parameter, but only if util.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = '%s() takes at most %d positional argument%s (%d given)' % (
wrapped.__name__, max_positional_args, plural_s, len(args))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
else: # IGNORE
pass
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, (int, long)):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, types.StringTypes):
return scopes
else:
return ' '.join(scopes)
def dict_to_tuple_key(dictionary):
"""Converts a dictionary to a tuple that can be used as an immutable key.
The resulting key is always sorted so that logically equivalent dictionaries
always produce an identical tuple for a key.
Args:
dictionary: the dictionary to use as the key.
Returns:
A tuple representing the dictionary in it's naturally sorted ordering.
"""
return tuple(sorted(dictionary.items()))
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
| mit |
UOMx/edx-platform | openedx/core/lib/api/tests/test_exceptions.py | 14 | 2774 | """
Test Custom Exceptions
"""
import ddt
from django.test import TestCase
from nose.plugins.attrib import attr
from rest_framework import exceptions as drf_exceptions
from .. import exceptions
@attr('shard_2')
@ddt.ddt
class TestDictExceptionsAllowDictDetails(TestCase):
"""
Standard DRF exceptions coerce detail inputs to strings. We want to use
dicts to allow better customization of error messages. Demonstrate that
we can provide dictionaries as exception details, and that custom
classes subclass the relevant DRF exceptions, to provide consistent
exception catching behavior.
"""
def test_drf_errors_coerce_strings(self):
# Demonstrate the base issue we are trying to solve.
exc = drf_exceptions.AuthenticationFailed({u'error_code': -1})
self.assertEqual(exc.detail, u"{u'error_code': -1}")
@ddt.data(
exceptions.AuthenticationFailed,
exceptions.NotAuthenticated,
exceptions.NotFound,
exceptions.ParseError,
exceptions.PermissionDenied,
)
def test_exceptions_allows_dict_detail(self, exception_class):
exc = exception_class({u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': -1})
def test_method_not_allowed_allows_dict_detail(self):
exc = exceptions.MethodNotAllowed(u'POST', {u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': -1})
def test_not_acceptable_allows_dict_detail(self):
exc = exceptions.NotAcceptable({u'error_code': -1}, available_renderers=['application/json'])
self.assertEqual(exc.detail, {u'error_code': -1})
self.assertEqual(exc.available_renderers, ['application/json'])
@attr('shard_2')
@ddt.ddt
class TestDictExceptionSubclassing(TestCase):
"""
Custom exceptions should subclass standard DRF exceptions, so code that
catches the DRF exceptions also catches ours.
"""
@ddt.data(
(exceptions.AuthenticationFailed, drf_exceptions.AuthenticationFailed),
(exceptions.NotAcceptable, drf_exceptions.NotAcceptable),
(exceptions.NotAuthenticated, drf_exceptions.NotAuthenticated),
(exceptions.NotFound, drf_exceptions.NotFound),
(exceptions.ParseError, drf_exceptions.ParseError),
(exceptions.PermissionDenied, drf_exceptions.PermissionDenied),
)
@ddt.unpack
def test_exceptions_subclass_drf_exceptions(self, exception_class, drf_exception_class):
exc = exception_class({u'error_code': -1})
self.assertIsInstance(exc, drf_exception_class)
def test_method_not_allowed_subclasses_drf_exception(self):
exc = exceptions.MethodNotAllowed(u'POST', {u'error_code': -1})
self.assertIsInstance(exc, drf_exceptions.MethodNotAllowed)
| agpl-3.0 |
u0m3/gr-baz | python/op25.py | 1 | 8345 | #
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
_verbose = True
import math
from gnuradio import gr, gru, op25 as _op25
try:
from gnuradio import fsk4 # LEGACY
if _verbose:
print "Imported legacy fsk4"
except:
pass
SYMBOL_DEVIATION = 600
SYMBOL_RATE = 4800
class op25_fsk4(gr.hier_block2):
def __init__(self, channel_rate, auto_tune_msgq=None):
gr.hier_block2.__init__(self, "op25_fsk4",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float))
self.symbol_rate = SYMBOL_RATE
#print "Channel rate:", channel_rate
self.channel_rate = channel_rate
self.auto_tune_msgq = auto_tune_msgq
if self.auto_tune_msgq is None:
self.auto_tune_msgq = gr.msg_queue(2)
# C4FM demodulator
#print "Symbol rate:", self.symbol_rate
try:
self.demod_fsk4 = _op25.fsk4_demod_ff(self.auto_tune_msgq, self.channel_rate, self.symbol_rate)
if _verbose:
print "Using new fsk4_demod_ff"
except:
try:
self.demod_fsk4 = fsk4.demod_ff(self.auto_tune_msgq, self.channel_rate, self.symbol_rate) # LEGACY
if _verbose:
print "Using legacy fsk4.demod_ff"
except:
raise Exception("Could not find a FSK4 demodulator to use")
self.connect(self, self.demod_fsk4, self)
class op25_decoder_simple(gr.hier_block2):
def __init__(self, traffic_msgq=None, key=None):
gr.hier_block2.__init__(self, "op25_decoder",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float))
self.traffic_msgq = traffic_msgq
self.key = key
if self.traffic_msgq is None:
self.traffic_msgq = gr.msg_queue(2)
self.slicer = None
try:
levels = [ -2.0, 0.0, 2.0, 4.0 ]
self.slicer = _op25.fsk4_slicer_fb(levels)
self.p25_decoder = _op25.decoder_bf()
self.p25_decoder.set_msgq(self.traffic_msgq)
if _verbose:
print "Using new decoder_bf"
except:
try:
self.p25_decoder = _op25.decoder_ff(self.traffic_msgq) # LEGACY
if _verbose:
print "Using legacy decoder_ff"
except:
raise Exception("Could not find a decoder to use")
if (self.key is not None) and (len(self.key) > 0): # Relates to key string passed in from GRC block
self.set_key(self.key)
if self.slicer:
self.connect(self, self.slicer, self.p25_decoder)
else:
self.connect(self, self.p25_decoder)
self.connect(self.p25_decoder, self)
def set_key(self, key):
try:
if type(key) == str:
if len(key) == 0: # FIXME: Go back into the clear
#print "Cannot set key using empty string"
return False
key = int(key, 16) # Convert from hex string
if not hasattr(self.p25_decoder, 'set_key'):
print "This version of the OP25 decoder does not support decryption"
return False
self.p25_decoder.set_key(key)
return True
except Exception, e:
print "Exception while setting key:", e
return False
class op25_decoder(gr.hier_block2):
def __init__(self, channel_rate, auto_tune_msgq=None, defer_creation=False, output_dibits=False, key=None, traffic_msgq=None):
num_outputs = 1
if output_dibits:
num_outputs += 1
gr.hier_block2.__init__(self, "op25",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(num_outputs, num_outputs, gr.sizeof_float))
self.symbol_rate = SYMBOL_RATE
#print "Channel rate:", channel_rate
self.channel_rate = channel_rate
self.auto_tune_msgq = auto_tune_msgq
self.traffic_msgq = traffic_msgq
self.output_dibits = output_dibits
self.key = key
self.traffic_msgq = gr.msg_queue(2)
if defer_creation == False:
self.create()
def create(self):
self.fsk4 = op25_fsk4(channel_rate=self.channel_rate, auto_tune_msgq=self.auto_tune_msgq)
self.decoder = op25_decoder_simple(traffic_msgq=self.traffic_msgq, key=self.key)
# Reference code
#self.decode_watcher = decode_watcher(self.op25_msgq, self.traffic)
# Reference code
#trans_width = 12.5e3 / 2;
#trans_centre = trans_width + (trans_width / 2)
# discriminator tap doesn't do freq. xlation, FM demodulation, etc.
# coeffs = gr.firdes.low_pass(1.0, capture_rate, trans_centre, trans_width, gr.firdes.WIN_HANN)
# self.channel_filter = gr.freq_xlating_fir_filter_ccf(channel_decim, coeffs, 0.0, capture_rate)
# self.set_channel_offset(0.0, 0, self.spectrum.win._units)
# # power squelch
# squelch_db = 0
# self.squelch = gr.pwr_squelch_cc(squelch_db, 1e-3, 0, True)
# self.set_squelch_threshold(squelch_db)
# # FM demodulator
# fm_demod_gain = channel_rate / (2.0 * pi * self.symbol_deviation)
# fm_demod = gr.quadrature_demod_cf(fm_demod_gain)
# symbol filter
#symbol_decim = 1
#symbol_coeffs = gr.firdes.root_raised_cosine(1.0, channel_rate, self.symbol_rate, 0.2, 500)
# boxcar coefficients for "integrate and dump" filter
#samples_per_symbol = channel_rate // self.symbol_rate
#symbol_duration = float(self.symbol_rate) / channel_rate
#print "Symbol duration:", symbol_duration
#print "Samples per symbol:", samples_per_symbol
#symbol_coeffs = (1.0/samples_per_symbol,)*samples_per_symbol
#self.symbol_filter = gr.fir_filter_fff(symbol_decim, symbol_coeffs)
# Reference code
#self.demod_watcher = demod_watcher(autotuneq, self.adjust_channel_offset)
#list = [[self, self.channel_filter, self.squelch, fm_demod, self.symbol_filter, demod_fsk4, self.p25_decoder, self.sink]]
self.connect(self, self.fsk4, self.decoder, (self, 0))
if self.output_dibits:
self.connect(self.fsk4, (self, 1))
def set_key(self, key):
try:
if type(key) == str:
if len(key) == 0: # FIXME: Go back into the clear
#print "Cannot set key using empty string"
return False
key = int(key, 16) # Convert from hex string
if not hasattr(self.p25_decoder, 'set_key'):
print "This version of the OP25 decoder does not support decryption"
return False
self.p25_decoder.set_key(key)
return True
except Exception, e:
print "Exception while setting key:", e
return False
# Reference code
#def adjust_channel_offset(self, delta_hz):
# max_delta_hz = 12000.0
# delta_hz *= self.symbol_deviation
# delta_hz = max(delta_hz, -max_delta_hz)
# delta_hz = min(delta_hz, max_delta_hz)
# self.channel_filter.set_center_freq(self.channel_offset - delta_hz)
| gpl-3.0 |
bilgili/Voreen | modules/python/ext/python27/modules/urllib.py | 29 | 57695 | """Open an arbitrary URL.
See the following document for more info on URLs:
"Names and Addresses, URIs, URLs, URNs, URCs", at
http://www.w3.org/pub/WWW/Addressing/Overview.html
See also the HTTP spec (from which the error codes are derived):
"HTTP - Hypertext Transfer Protocol", at
http://www.w3.org/pub/WWW/Protocols/
Related standards and specs:
- RFC1808: the "relative URL" spec. (authoritative status)
- RFC1738 - the "URL standard". (authoritative status)
- RFC1630 - the "URI spec". (informational status)
The object returned by URLopener().open(file) will differ per
protocol. All you know is that is has methods read(), readline(),
readlines(), fileno(), close() and info(). The read*(), fileno()
and close() methods work like those of open files.
The info() method returns a mimetools.Message object which can be
used to query various info about the object, if available.
(mimetools.Message objects are queried with the getheader() method.)
"""
import string
import socket
import os
import time
import sys
import base64
from urlparse import urljoin as basejoin
__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve",
"urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "url2pathname", "pathname2url", "splittag",
"localhost", "thishost", "ftperrors", "basejoin", "unwrap",
"splittype", "splithost", "splituser", "splitpasswd", "splitport",
"splitnport", "splitquery", "splitattr", "splitvalue",
"getproxies"]
__version__ = '1.17' # XXX This version is not always updated :-(
MAXFTPCACHE = 10 # Trim the ftp cache beyond this size
# Helper for non-unix systems
if os.name == 'nt':
from nturl2path import url2pathname, pathname2url
elif os.name == 'riscos':
from rourl2path import url2pathname, pathname2url
else:
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
return unquote(pathname)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
return quote(pathname)
# This really consists of two pieces:
# (1) a class which handles opening of all sorts of URLs
# (plus assorted utilities etc.)
# (2) a set of functions for parsing URLs
# XXX Should these be separated out into different modules?
# Shortcut for basic usage
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""Create a file-like object for the specified URL to read from."""
from warnings import warnpy3k
warnpy3k("urllib.urlopen() has been removed in Python 3.0 in "
"favor of urllib2.urlopen()", stacklevel=2)
global _urlopener
if proxies is not None:
opener = FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def urlretrieve(url, filename=None, reporthook=None, data=None):
global _urlopener
if not _urlopener:
_urlopener = FancyURLopener()
return _urlopener.retrieve(url, filename, reporthook, data)
def urlcleanup():
if _urlopener:
_urlopener.cleanup()
_safe_quoters.clear()
ftpcache.clear()
# check for SSL
try:
import ssl
except:
_have_ssl = False
else:
_have_ssl = True
# exception raised when downloaded size does not match content-length
class ContentTooShortError(IOError):
def __init__(self, message, content):
IOError.__init__(self, message)
self.content = content
ftpcache = {}
class URLopener:
"""Class to open URLs.
This is a class rather than just a subroutine because we may need
more than one set of global protocol-specific options.
Note -- this is a base class for those who don't want the
automatic handling of errors type 302 (relocated) and 401
(authorization needed)."""
__tempfiles = None
version = "Python-urllib/%s" % __version__
# Constructor
def __init__(self, proxies=None, **x509):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
self.key_file = x509.get('key_file')
self.cert_file = x509.get('cert_file')
self.addheaders = [('User-Agent', self.version)]
self.__tempfiles = []
self.__unlink = os.unlink # See cleanup()
self.tempcache = None
# Undocumented feature: if you assign {} to tempcache,
# it is used to cache files retrieved with
# self.retrieve(). This is not enabled by default
# since it does not work for changing documents (and I
# haven't got the logic to check expiration headers
# yet).
self.ftpcache = ftpcache
# Undocumented feature: you can use a different
# ftp cache by assigning to the .ftpcache member;
# in case you want logically independent URL openers
# XXX This is not threadsafe. Bah.
def __del__(self):
self.close()
def close(self):
self.cleanup()
def cleanup(self):
# This code sometimes runs when the rest of this module
# has already been deleted, so it can't use any globals
# or import anything.
if self.__tempfiles:
for file in self.__tempfiles:
try:
self.__unlink(file)
except OSError:
pass
del self.__tempfiles[:]
if self.tempcache:
self.tempcache.clear()
def addheader(self, *args):
"""Add a header to be used by the HTTP interface only
e.g. u.addheader('Accept', 'sound/basic')"""
self.addheaders.append(args)
# External interface
def open(self, fullurl, data=None):
"""Use URLopener().open(file) instead of open(file, 'r')."""
fullurl = unwrap(toBytes(fullurl))
# percent encode url, fixing lame server errors for e.g, like space
# within url paths.
fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|")
if self.tempcache and fullurl in self.tempcache:
filename, headers = self.tempcache[fullurl]
fp = open(filename, 'rb')
return addinfourl(fp, headers, fullurl)
urltype, url = splittype(fullurl)
if not urltype:
urltype = 'file'
if urltype in self.proxies:
proxy = self.proxies[urltype]
urltype, proxyhost = splittype(proxy)
host, selector = splithost(proxyhost)
url = (host, fullurl) # Signal special case to open_*()
else:
proxy = None
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
if not hasattr(self, name):
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
return self.open_unknown(fullurl, data)
try:
if data is None:
return getattr(self, name)(url)
else:
return getattr(self, name)(url, data)
except socket.error, msg:
raise IOError, ('socket error', msg), sys.exc_info()[2]
def open_unknown(self, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'unknown url type', type)
def open_unknown_proxy(self, proxy, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
# External interface
def retrieve(self, url, filename=None, reporthook=None, data=None):
"""retrieve(url) returns (filename, headers) for a local object
or (tempfilename, headers) for a remote object."""
url = unwrap(toBytes(url))
if self.tempcache and url in self.tempcache:
return self.tempcache[url]
type, url1 = splittype(url)
if filename is None and (not type or type == 'file'):
try:
fp = self.open_local_file(url1)
hdrs = fp.info()
fp.close()
return url2pathname(splithost(url1)[1]), hdrs
except IOError:
pass
fp = self.open(url, data)
try:
headers = fp.info()
if filename:
tfp = open(filename, 'wb')
else:
import tempfile
garbage, path = splittype(url)
garbage, path = splithost(path or "")
path, garbage = splitquery(path or "")
path, garbage = splitattr(path or "")
suffix = os.path.splitext(path)[1]
(fd, filename) = tempfile.mkstemp(suffix)
self.__tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
if self.tempcache is not None:
self.tempcache[url] = result
bs = 1024*8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError("retrieval incomplete: got only %i out "
"of %i bytes" % (read, size), result)
return result
# Each method named open_<type> knows how to open that type of URL
def open_http(self, url, data=None):
"""Use HTTP protocol."""
import httplib
user_passwd = None
proxy_passwd= None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# check whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
# now we proceed with the url we want to obtain
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
if proxy_bypass(realhost):
host = realhost
#print "proxy via http:", host, selector
if not host: raise IOError, ('http error', 'no host given')
if proxy_passwd:
proxy_passwd = unquote(proxy_passwd)
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
user_passwd = unquote(user_passwd)
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTP(host)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type', 'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "http:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers, data)
def http_error(self, url, fp, errcode, errmsg, headers, data=None):
"""Handle http errors.
Derived class can override this, or provide specific handlers
named http_error_DDD where DDD is the 3-digit error code."""
# First check if there's a specific handler for this error
name = 'http_error_%d' % errcode
if hasattr(self, name):
method = getattr(self, name)
if data is None:
result = method(url, fp, errcode, errmsg, headers)
else:
result = method(url, fp, errcode, errmsg, headers, data)
if result: return result
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handler: close the connection and raise IOError."""
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
if _have_ssl:
def open_https(self, url, data=None):
"""Use HTTPS protocol."""
import httplib
user_passwd = None
proxy_passwd = None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# here, we determine, whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'https':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
#print "proxy via https:", host, selector
if not host: raise IOError, ('https error', 'no host given')
if proxy_passwd:
proxy_passwd = unquote(proxy_passwd)
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
user_passwd = unquote(user_passwd)
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTPS(host, 0,
key_file=self.key_file,
cert_file=self.cert_file)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type',
'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "https:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers,
data)
def open_file(self, url):
"""Use local file or FTP depending on form of URL."""
if not isinstance(url, str):
raise IOError, ('file error', 'proxy support for file protocol currently not implemented')
if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
return self.open_ftp(url)
else:
return self.open_local_file(url)
def open_local_file(self, url):
"""Use local file."""
import mimetypes, mimetools, email.utils
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, file = splithost(url)
localname = url2pathname(file)
try:
stats = os.stat(localname)
except OSError, e:
raise IOError(e.errno, e.strerror, e.filename)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(url)[0]
headers = mimetools.Message(StringIO(
'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if not host:
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
elif file[:2] == './':
raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url)
return addinfourl(open(localname, 'rb'),
headers, urlfile)
host, port = splitport(host)
if not port \
and socket.gethostbyname(host) in (localhost(), thishost()):
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'),
headers, urlfile)
raise IOError, ('local file error', 'not on local host')
def open_ftp(self, url):
"""Use FTP protocol."""
if not isinstance(url, str):
raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented')
import mimetypes, mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, path = splithost(url)
if not host: raise IOError, ('ftp error', 'no host given')
host, port = splitport(host)
user, host = splituser(host)
if user: user, passwd = splitpasswd(user)
else: passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
host = socket.gethostbyname(host)
if not port:
import ftplib
port = ftplib.FTP_PORT
else:
port = int(port)
path, attrs = splitattr(path)
path = unquote(path)
dirs = path.split('/')
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]: dirs = dirs[1:]
if dirs and not dirs[0]: dirs[0] = '/'
key = user, host, port, '/'.join(dirs)
# XXX thread unsafe!
if len(self.ftpcache) > MAXFTPCACHE:
# Prune the cache, rather arbitrarily
for k in self.ftpcache.keys():
if k != key:
v = self.ftpcache[k]
del self.ftpcache[k]
v.close()
try:
if not key in self.ftpcache:
self.ftpcache[key] = \
ftpwrapper(user, passwd, host, port, dirs)
if not file: type = 'D'
else: type = 'I'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
(fp, retrlen) = self.ftpcache[key].retrfile(file, type)
mtype = mimetypes.guess_type("ftp:" + url)[0]
headers = ""
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = mimetools.Message(StringIO(headers))
return addinfourl(fp, headers, "ftp:" + url)
except ftperrors(), msg:
raise IOError, ('ftp error', msg), sys.exc_info()[2]
def open_data(self, url, data=None):
"""Use "data" URL."""
if not isinstance(url, str):
raise IOError, ('data error', 'proxy support for data protocol currently not implemented')
# ignore POSTed data
#
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
[type, data] = url.split(',', 1)
except ValueError:
raise IOError, ('data error', 'bad data URL')
if not type:
type = 'text/plain;charset=US-ASCII'
semi = type.rfind(';')
if semi >= 0 and '=' not in type[semi:]:
encoding = type[semi+1:]
type = type[:semi]
else:
encoding = ''
msg = []
msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(time.time())))
msg.append('Content-type: %s' % type)
if encoding == 'base64':
data = base64.decodestring(data)
else:
data = unquote(data)
msg.append('Content-Length: %d' % len(data))
msg.append('')
msg.append(data)
msg = '\n'.join(msg)
f = StringIO(msg)
headers = mimetools.Message(f, 0)
#f.fileno = None # needed for addinfourl
return addinfourl(f, headers, url)
class FancyURLopener(URLopener):
"""Derived class with handlers for errors we can handle (perhaps)."""
def __init__(self, *args, **kwargs):
URLopener.__init__(self, *args, **kwargs)
self.auth_cache = {}
self.tries = 0
self.maxtries = 10
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handling -- don't raise an exception."""
return addinfourl(fp, headers, "http:" + url, errcode)
def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 302 -- relocated (temporarily)."""
self.tries += 1
if self.maxtries and self.tries >= self.maxtries:
if hasattr(self, "http_error_500"):
meth = self.http_error_500
else:
meth = self.http_error_default
self.tries = 0
return meth(url, fp, 500,
"Internal Server Error: Redirect Recursion", headers)
result = self.redirect_internal(url, fp, errcode, errmsg, headers,
data)
self.tries = 0
return result
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
fp.close()
# In case the server sent a relative URL, join with original:
newurl = basejoin(self.type + ":" + url, newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise IOError('redirect error', errcode,
errmsg + " - Redirection to url '%s' is not allowed" %
newurl,
headers)
return self.open(newurl)
def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 301 -- also relocated (permanently)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 303 -- also relocated (essentially identical to 302)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if not 'www-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def http_error_407(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 407 -- proxy authentication required.
This function supports Basic authentication only."""
if not 'proxy-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['proxy-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_proxy_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def retry_proxy_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'http://' + host + selector
proxy = self.proxies['http']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['http'] = 'http://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_proxy_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'https://' + host + selector
proxy = self.proxies['https']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['https'] = 'https://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'http://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'https://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def get_user_passwd(self, host, realm, clear_cache=0):
key = realm + '@' + host.lower()
if key in self.auth_cache:
if clear_cache:
del self.auth_cache[key]
else:
return self.auth_cache[key]
user, passwd = self.prompt_user_passwd(host, realm)
if user or passwd: self.auth_cache[key] = (user, passwd)
return user, passwd
def prompt_user_passwd(self, host, realm):
"""Override this in a GUI environment!"""
import getpass
try:
user = raw_input("Enter username for %s at %s: " % (realm,
host))
passwd = getpass.getpass("Enter password for %s in %s at %s: " %
(user, realm, host))
return user, passwd
except KeyboardInterrupt:
print
return None, None
# Utility functions
_localhost = None
def localhost():
"""Return the IP address of the magic hostname 'localhost'."""
global _localhost
if _localhost is None:
_localhost = socket.gethostbyname('localhost')
return _localhost
_thishost = None
def thishost():
"""Return the IP address of the current host."""
global _thishost
if _thishost is None:
_thishost = socket.gethostbyname(socket.gethostname())
return _thishost
_ftperrors = None
def ftperrors():
"""Return the set of errors raised by the FTP class."""
global _ftperrors
if _ftperrors is None:
import ftplib
_ftperrors = ftplib.all_errors
return _ftperrors
_noheaders = None
def noheaders():
"""Return an empty mimetools.Message object."""
global _noheaders
if _noheaders is None:
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
_noheaders = mimetools.Message(StringIO(), 0)
_noheaders.fp.close() # Recycle file descriptor
return _noheaders
# Utility classes
class ftpwrapper:
"""Class used by open_ftp() for cache of open FTP connections."""
def __init__(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
persistent=True):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.dirs = dirs
self.timeout = timeout
self.refcount = 0
self.keepalive = persistent
self.init()
def init(self):
import ftplib
self.busy = 0
self.ftp = ftplib.FTP()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
for dir in self.dirs:
self.ftp.cwd(dir)
def retrfile(self, file, type):
import ftplib
self.endtransfer()
if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
else: cmd = 'TYPE ' + type; isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn, retrlen = self.ftp.ntransfercmd(cmd)
except ftplib.error_perm, reason:
if str(reason)[:3] != '550':
raise IOError, ('ftp error', reason), sys.exc_info()[2]
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing. Verify that directory exists.
if file:
pwd = self.ftp.pwd()
try:
try:
self.ftp.cwd(file)
except ftplib.error_perm, reason:
raise IOError, ('ftp error', reason), sys.exc_info()[2]
finally:
self.ftp.cwd(pwd)
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn, retrlen = self.ftp.ntransfercmd(cmd)
self.busy = 1
ftpobj = addclosehook(conn.makefile('rb'), self.file_close)
self.refcount += 1
conn.close()
# Pass back both a suitably decorated object and a retrieval length
return (ftpobj, retrlen)
def endtransfer(self):
if not self.busy:
return
self.busy = 0
try:
self.ftp.voidresp()
except ftperrors():
pass
def close(self):
self.keepalive = False
if self.refcount <= 0:
self.real_close()
def file_close(self):
self.endtransfer()
self.refcount -= 1
if self.refcount <= 0 and not self.keepalive:
self.real_close()
def real_close(self):
self.endtransfer()
try:
self.ftp.close()
except ftperrors():
pass
class addbase:
"""Base class for addinfo and addclosehook."""
def __init__(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
if hasattr(self.fp, "__iter__"):
self.__iter__ = self.fp.__iter__
if hasattr(self.fp, "next"):
self.next = self.fp.next
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
if self.fp: self.fp.close()
self.fp = None
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
addbase.close(self)
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
try:
unicode
except NameError:
def _is_unicode(x):
return 0
else:
def _is_unicode(x):
return isinstance(x, unicode)
def toBytes(url):
"""toBytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed
if _is_unicode(url):
try:
url = url.encode("ASCII")
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = url.strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError, "no digits"
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
# urlparse contains a duplicate of this method to avoid a circular import. If
# you update this method, also update the copy in urlparse. This code
# duplication does not exist in Python3.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16)))
for a in _hexdig for b in _hexdig)
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
# fastpath
if len(res) == 1:
return s
s = res[0]
for item in res[1:]:
try:
s += _hextochr[item[:2]] + item[2:]
except KeyError:
s += '%' + item
except UnicodeDecodeError:
s += unichr(int(item[:2], 16)) + item[2:]
return s
def unquote_plus(s):
"""unquote('%7e/abc+def') -> '~/abc def'"""
s = s.replace('+', ' ')
return unquote(s)
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
_safe_map = {}
for i, c in zip(xrange(256), str(bytearray(xrange(256)))):
_safe_map[c] = c if (i < 128 and c in always_safe) else '%{:02X}'.format(i)
_safe_quoters = {}
def quote(s, safe='/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
# fastpath
if not s:
if s is None:
raise TypeError('None object cannot be quoted')
return s
cachekey = (safe, always_safe)
try:
(quoter, safe) = _safe_quoters[cachekey]
except KeyError:
safe_map = _safe_map.copy()
safe_map.update([(c, c) for c in safe])
quoter = safe_map.__getitem__
safe = always_safe + safe
_safe_quoters[cachekey] = (quoter, safe)
if not s.rstrip(safe):
return s
return ''.join(map(quoter, s))
def quote_plus(s, safe=''):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
if ' ' in s:
s = quote(s, safe + ' ')
return s.replace(' ', '+')
return quote(s, safe)
def urlencode(query, doseq=0):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k))
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_plus(str(k))
if isinstance(v, str):
v = quote_plus(v)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_plus(str(elt)))
return '&'.join(l)
# Proxy handling
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
proxies = {}
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
return proxies
def proxy_bypass_environment(host):
"""Test if proxies should not be used for a particular host.
Checks the environment for a variable named no_proxy, which should
be a list of DNS suffixes separated by commas, or '*' for all hosts.
"""
no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
# '*' is special case for always bypass
if no_proxy == '*':
return 1
# strip port off host
hostonly, port = splitport(host)
# check if the host ends with any of the DNS suffixes
no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')]
for name in no_proxy_list:
if name and (hostonly.endswith(name) or host.endswith(name)):
return 1
# otherwise, don't bypass
return 0
if sys.platform == 'darwin':
from _scproxy import _get_proxy_settings, _get_proxies
def proxy_bypass_macosx_sysconf(host):
"""
Return True iff this host shouldn't be accessed using a proxy
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
import re
import socket
from fnmatch import fnmatch
hostonly, port = splitport(host)
def ip2num(ipAddr):
parts = ipAddr.split('.')
parts = map(int, parts)
if len(parts) != 4:
parts = (parts + [0, 0, 0, 0])[:4]
return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3]
proxy_settings = _get_proxy_settings()
# Check for simple host names:
if '.' not in host:
if proxy_settings['exclude_simple']:
return True
hostIP = None
for value in proxy_settings.get('exceptions', ()):
# Items in the list are strings like these: *.local, 169.254/16
if not value: continue
m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value)
if m is not None:
if hostIP is None:
try:
hostIP = socket.gethostbyname(hostonly)
hostIP = ip2num(hostIP)
except socket.error:
continue
base = ip2num(m.group(1))
mask = m.group(2)
if mask is None:
mask = 8 * (m.group(1).count('.') + 1)
else:
mask = int(mask[1:])
mask = 32 - mask
if (hostIP >> mask) == (base >> mask):
return True
elif fnmatch(host, value):
return True
return False
def getproxies_macosx_sysconf():
"""Return a dictionary of scheme -> proxy server URL mappings.
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
return _get_proxies()
def proxy_bypass(host):
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_macosx_sysconf(host)
def getproxies():
return getproxies_environment() or getproxies_macosx_sysconf()
elif os.name == 'nt':
def getproxies_registry():
"""Return a dictionary of scheme -> proxy server URL mappings.
Win32 uses the registry to store proxies.
"""
proxies = {}
try:
import _winreg
except ImportError:
# Std module, so should be around - but you never know!
return proxies
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
if proxyEnable:
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(_winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
if '=' in proxyServer:
# Per-protocol settings
for p in proxyServer.split(';'):
protocol, address = p.split('=', 1)
# See if address has a type:// prefix
import re
if not re.match('^([^/:]+)://', address):
address = '%s://%s' % (protocol, address)
proxies[protocol] = address
else:
# Use one setting for all protocols
if proxyServer[:5] == 'http:':
proxies['http'] = proxyServer
else:
proxies['http'] = 'http://%s' % proxyServer
proxies['https'] = 'https://%s' % proxyServer
proxies['ftp'] = 'ftp://%s' % proxyServer
internetSettings.Close()
except (WindowsError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
# unexpected format.
# proxies already set up to be empty so nothing to do
pass
return proxies
def getproxies():
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
return getproxies_environment() or getproxies_registry()
def proxy_bypass_registry(host):
try:
import _winreg
import re
except ImportError:
# Std modules, so should be around - but you never know!
return 0
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = str(_winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0])
# ^^^^ Returned as Unicode but problems if not converted to ASCII
except WindowsError:
return 0
if not proxyEnable or not proxyOverride:
return 0
# try to make a host list from name and IP address.
rawHost, port = splitport(host)
host = [rawHost]
try:
addr = socket.gethostbyname(rawHost)
if addr != rawHost:
host.append(addr)
except socket.error:
pass
try:
fqdn = socket.getfqdn(rawHost)
if fqdn != rawHost:
host.append(fqdn)
except socket.error:
pass
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in rawHost:
return 1
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
for val in host:
# print "%s <--> %s" %( test, val )
if re.match(test, val, re.I):
return 1
return 0
def proxy_bypass(host):
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
else:
# By default use environment variables
getproxies = getproxies_environment
proxy_bypass = proxy_bypass_environment
# Test and time quote() and unquote()
def test1():
s = ''
for i in range(256): s = s + chr(i)
s = s*4
t0 = time.time()
qs = quote(s)
uqs = unquote(qs)
t1 = time.time()
if uqs != s:
print 'Wrong!'
print repr(s)
print repr(qs)
print repr(uqs)
print round(t1 - t0, 3), 'sec'
def reporthook(blocknum, blocksize, totalsize):
# Report during remote transfers
print "Block number: %d, Block size: %d, Total size: %d" % (
blocknum, blocksize, totalsize)
| gpl-2.0 |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/numpy/f2py/tests/test_size.py | 34 | 1049 | import os
import math
from numpy.testing import *
from numpy import array
import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestSizeSumExample(util.F2PyTest):
sources = [_path('src', 'size', 'foo.f90'),
]
@dec.slow
def test_all(self):
r = self.module.foo([[1,2]])
assert_equal(r, [3],`r`)
r = self.module.foo([[1,2],[3,4]])
assert_equal(r, [3,7],`r`)
r = self.module.foo([[1,2],[3,4],[5,6]])
assert_equal(r, [3,7,11],`r`)
@dec.slow
def test_transpose(self):
r = self.module.trans([[1,2]])
assert_equal(r, [[1],[2]],`r`)
r = self.module.trans([[1,2,3],[4,5,6]])
assert_equal(r, [[1,4],[2,5],[3,6]],`r`)
@dec.slow
def test_flatten(self):
r = self.module.flatten([[1,2]])
assert_equal(r, [1,2],`r`)
r = self.module.flatten([[1,2,3],[4,5,6]])
assert_equal(r, [1,2,3,4,5,6],`r`)
if __name__ == "__main__":
import nose
nose.runmodule()
| gpl-3.0 |
happyboy310/keras | tests/manual/check_constraints.py | 86 | 2841 | from __future__ import absolute_import
from __future__ import print_function
import keras
from keras.datasets import mnist
import keras.models
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.regularizers import l2, l1
from keras.constraints import maxnorm, nonneg
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils, generic_utils
import theano
import theano.tensor as T
import numpy as np
import scipy
batch_size = 100
nb_classes = 10
nb_epoch = 10
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train=X_train.reshape(60000,784)
X_test=X_test.reshape(10000,784)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(784, 20, W_constraint=maxnorm(1)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(20, 20, W_constraint=nonneg()))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(20, 10, W_constraint=maxnorm(1)))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0)
a=model.params[0].eval()
if np.isclose(np.max(np.sqrt(np.sum(a**2, axis=0))),1):
print('Maxnorm test passed')
else:
raise ValueError('Maxnorm test failed!')
b=model.params[2].eval()
if np.min(b)==0 and np.min(a)!=0:
print('Nonneg test passed')
else:
raise ValueError('Nonneg test failed!')
model = Sequential()
model.add(Dense(784, 20))
model.add(Activation('relu'))
model.add(Dense(20, 20, W_regularizer=l1(.01)))
model.add(Activation('relu'))
model.add(Dense(20, 10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=20, show_accuracy=True, verbose=0)
a=model.params[2].eval().reshape(400)
(D, p1) = scipy.stats.kurtosistest(a)
model = Sequential()
model.add(Dense(784, 20))
model.add(Activation('relu'))
model.add(Dense(20, 20, W_regularizer=l2(.01)))
model.add(Activation('relu'))
model.add(Dense(20, 10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=20, show_accuracy=True, verbose=0)
a=model.params[2].eval().reshape(400)
(D, p2) = scipy.stats.kurtosistest(a)
if p1<.01 and p2>.01:
print('L1 and L2 regularization tests passed')
else:
raise ValueError('L1 and L2 regularization tests failed!') | mit |
EvenStrangest/tensorflow | tensorflow/examples/skflow/iris_custom_decay_dnn.py | 3 | 1749 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
# use customized decay function in learning_rate
optimizer = tf.train.AdagradOptimizer(learning_rate=exp_decay)
classifier = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer)
classifier.fit(X_train, y_train, steps=800)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.