repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
onlynone/Yumpy | yum/fssnapshots.py | 3 | 8990 |
import os
import fnmatch
import time
import subprocess
try:
import lvm
# Check that lvm2 is at least 2.2.99... In theory hacked versions of
# .98 work, but meh.
_ver = lvm.getVersion()
# Looks liks: 2.02.84(2) (2011-02-09)
_ver = _ver.split()[0]
_ver = _ver.split('(')[0]
_ver = tuple(map(int, _ver.split('.')))
if _ver < (2, 2, 99):
lvm = None
except:
lvm = None
_ver = None
def _is_origin(lv):
snap = lv.getProperty("lv_attr")
# snap=(<value>, <is settable>)
if not snap[0]: # Broken??
return None
return snap[0][0] in ('o', 'O')
def _is_snap(lv):
snap = lv.getProperty("lv_attr")
# snap=(<value>, <is settable>)
if not snap[0]: # Broken??
return None
return snap[0][0] in ('s', 'S')
def _is_virt(lv):
snap = lv.getProperty("lv_attr")
# snap=(<value>, <is settable>)
if not snap[0]: # Broken??
return None
return snap[0][0] == 'v'
def _vg_name2lv(vg, lvname):
try:
return vg.lvFromName(lvname)
except:
return None
def _list_vg_names():
names = lvm.listVgNames()
if not names: # Could be just broken...
p = subprocess.Popen(["/sbin/lvm", "vgs", "-o", "vg_name"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = p.wait()
if err:
return [] # Meh.
output = p.communicate()[0]
output = output.split('\n')
if not output:
return []
header = output[0].strip()
if header != 'VG':
return []
names = []
for name in output[1:]:
if not name:
break
names.append(name.strip())
return names
def _z_off(z, ctime=0):
if len(z) == 5: # +0000 / -0130 / etc.
off = int(z[1:3]) * 60
off += int(z[3:5])
off *= 60
if z[0] == '+':
ctime -= off
if z[0] == '-':
ctime += off
return ctime
def _lv_ctime2utc(ctime):
try: # Welcome to insanity ...
d,t,z = ctime.split()
ctime = time.strptime(d + ' ' + t, "%Y-%m-%d %H:%M:%S")
ctime = time.mktime(ctime)
if False: # Ignore the offset atm. ... we using this to delete older.
cur_z = time.strftime("%z")
if cur_z != z: # lol ...
cur_z = _z_off(cur_z)
z = _z_off(z)
ctime += (cur_z - z)
except:
ctime = 0
return ctime
def _lv_data(vg, lv):
vgname = vg.getName()
lvname = lv.getName()
size = lv.getSize()
origin = lv.getProperty("origin")[0]
tags = lv.getTags()
ctime = _lv_ctime2utc(lv.getProperty("lv_time")[0])
used = lv.getProperty("snap_percent")[0]
used = float(used)
used = used / (1 * 1000 * 1000)
data = {'dev' : "%s/%s" % (vgname, lvname),
'ctime' : ctime,
'origin' : origin,
'origin_dev' : "%s/%s" % (vgname, origin),
'free' : vg.getFreeSize(),
'tags' : tags,
'size' : size,
'used' : used}
return data
class _FSSnap(object):
# Old style was: vg/lv_root vg/lv_swap
# New style is: fedora/root fedora/swap
# New style is: redhat/root redhat/swap
def __init__(self, root="/", lookup_mounts=True,
devices=('!*/swap', '!*/lv_swap')):
if not lvm or os.geteuid():
devices = []
self.version = _ver
# Parts of the API seem to work even when lvm is not actually installed, hence the path test
self.available = bool(lvm and os.path.exists("/sbin/lvm"))
self.postfix_static = "_yum_"
self._postfix = None
self._root = root
self._devs = devices
self._vgnames = []
if not self._devs:
return
self._vgnames = _list_vg_names()
def _use_dev(self, vgname, lv=None):
if lv is not None:
if _is_snap(lv) or _is_virt(lv): # Don't look at these.
return False
found_neg = False
for dev in self._devs:
if '/' not in dev: # Bad...
continue
neg = False
if dev[0] == '!':
found_neg = True
neg = True
dev = dev[1:]
vgn,lvn = dev.split('/', 1)
if '/' in lvn:
continue
if not fnmatch.fnmatch(vgname, vgn):
continue
if lvn == '*':
return not neg
if lv is None:
return None
lvname = lv.getName()
if not fnmatch.fnmatch(lvname, lvn):
continue
return not neg
return found_neg
def has_space(self, percentage=100):
""" See if we have enough space to try a snapshot. """
ret = False
for vgname in self._vgnames:
use = self._use_dev(vgname)
if use is not None and not use:
continue
vg = lvm.vgOpen(vgname, 'r')
if not vg:
return False
vgfsize = vg.getFreeSize()
lvssize = 0
for lv in vg.listLVs():
if not self._use_dev(vgname, lv):
continue
lvssize += lv.getSize()
vg.close()
if not lvssize:
continue
ret = True
if (lvssize * percentage) > (100*vgfsize):
return False
return ret
def _get_postfix(self):
if self._postfix is None:
self._postfix = self.postfix_static
self._postfix += time.strftime("%Y%m%d%H%M%S")
return self._postfix
postfix = property(fget=lambda self: self._get_postfix(),
fset=lambda self, value: setattr(self, "_postfix",value),
fdel=lambda self: setattr(self, "_postfix", None),
doc="postfix for snapshots")
def snapshot(self, percentage=100, prefix='', postfix=None, tags={}):
""" Attempt to take a snapshot, note that errors can happen after
this function succeeds. """
if postfix is None:
postfix = self.postfix
ret = []
for vgname in self._vgnames:
use = self._use_dev(vgname)
if use is not None and not use:
continue
vg = lvm.vgOpen(vgname, 'w')
if not vg:
return False
for lv in vg.listLVs():
lvname = lv.getName()
if not self._use_dev(vgname, lv):
continue
nlvname = "%s%s%s" % (prefix, lvname, postfix)
nlv = lv.snapshot(nlvname, (lv.getSize() * percentage) / 100)
if not nlv: # Failed here ... continuing seems bad.
vg.close()
return None
odev = "%s/%s" % (vgname, lvname)
ndev = "%s/%s" % (vgname, nlvname)
# FIXME: yum_fssnapshot_pre_lv_name=<blah>
eq_tags = set()
for val in (ndev, odev, '*'):
for tag in tags.get(val, []):
if '=' in tag:
eq_tag_key,eq_tag_val = tag.split('=', 1)
if eq_tag_key in eq_tags:
continue
eq_tags.add(eq_tag_key)
nlv.addTag(tag)
ret.append((odev, ndev))
vg.close()
return ret
def old_snapshots(self):
""" List data for old snapshots. """
ret = []
for vgname in self._vgnames:
# We could filter out the VGs using _use_dev() but this way we'll
# see stuff after changing config. options.
vg = lvm.vgOpen(vgname, 'w')
for lv in vg.listLVs():
if not _is_snap(lv): # No snapshot means, we don't care.
continue
ret.append(_lv_data(vg, lv))
vg.close()
return ret
def del_snapshots(self, devices=[]):
""" Remove snapshots. """
if not lvm:
return []
ret = []
togo = {}
for dev in devices:
vgname,lvname = dev.split('/')
if vgname not in togo:
togo[vgname] = set([lvname])
else:
togo[vgname].add(lvname)
for vgname in togo:
vg = lvm.vgOpen(vgname, 'w')
for lvname in togo[vgname]:
lv = _vg_name2lv(vg, lvname)
if not lv:
continue
if not _is_snap(lv): # No snapshot means don't try to delete!
continue
ret.append(_lv_data(vg, lv))
lv.remove()
vg.close()
return ret
| gpl-2.0 |
neiljdo/readysaster-icannhas-web | readysaster-icannhas-web/users/views.py | 1 | 2241 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class FetchFloodMapView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
def get_context_data(self, **kwargs):
context_data = super(FetchFloodMapView, self).get_context_data(**kwargs)
# fetch flood maps using NOAH API
municipality = self.object.lgu.municipality
floodmaps = municipality.get_floodmaps()
# add newly fetched floodmaps to context
context_data.update({
'floodmaps': floodmaps
})
return context_data
| bsd-3-clause |
rpavlik/chromium | state_tracker/state_get.py | 4 | 5827 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys, re, string
sys.path.append( "../glapi_parser" )
import apiutil
line_re = re.compile (r'^(\S+)\s+(GL_\S+)\s+(.*)\s*$')
extensions_line_re = re.compile(r'^(\S+)\s+(GL_\S+)\s+(GL_\S+)\s+(.*)\s*$')
params = {}
extended_params = {}
input = open( "state_get.txt", 'r' )
for line in input.readlines():
if line[0] == '#':
continue
match = line_re.match( line )
if match:
type = match.group(1)
pname = match.group(2)
fields = string.split( match.group(3) )
params[pname] = ( type, fields )
input = open( "state_extensions_get.txt", 'r' )
for line in input.readlines():
if line[0] == '#':
continue
match = extensions_line_re.match( line )
if match:
type = match.group(1)
pname = match.group(2)
ifdef = match.group(3)
fields = string.split( match.group(4) )
extended_params[pname] = ( type, ifdef, fields )
convert = {
'GLenum' : {
'Boolean' : '(GLboolean) ( %s != 0 )',
'Double' : '(GLdouble) %s',
'Float' : '(GLfloat) %s',
'Integer' : '(GLint) %s'
},
'GLboolean' : {
'Boolean' : '(GLboolean) ( %s != 0 )',
'Double' : '(GLdouble) %s',
'Float' : '(GLfloat) %s',
'Integer' : '(GLint) %s'
},
'GLint' : {
'Boolean' : '(GLboolean) ( %s != 0 )',
'Double' : '(GLdouble) %s',
'Float' : '(GLfloat) %s',
'Integer' : '(GLint) %s'
},
'GLuint' : {
'Boolean' : '(GLboolean) ( %s != 0 )',
'Double' : '(GLdouble) %s',
'Float' : '(GLfloat) %s',
'Integer' : '(GLint) %s'
},
'GLfloat' : {
'Boolean' : '(GLboolean) ( %s != 0.0f )',
'Double' : '(GLdouble) %s',
'Float' : '%s',
'Integer' : '(GLint) %s'
},
'GLdouble' : {
'Boolean' : '(GLboolean) ( %s != 0.0 )',
'Double' : '%s',
'Float' : '(GLfloat) %s',
'Integer' : '(GLint) %s'
},
'GLdefault' : {
'Boolean' : '(GLboolean) ( %s != (GLdefault) 0.0 )',
'Double' : '(GLdouble) %s',
'Float' : '(GLfloat) %s',
'Integer' : '(GLint) %s'
},
'GLclampd' : {
'Boolean' : '(GLboolean) ( %s != 0.0 )',
'Double' : '%s',
'Float' : '(GLfloat) %s',
'Integer' : '__clampd_to_int(%s)'
},
'GLclampf' : {
'Boolean' : '(GLboolean) ( %s != 0.0f )',
'Double' : '(GLdouble) %s',
'Float' : '%s',
'Integer' : '__clampf_to_int(%s)'
}
}
types = [ "Boolean", "Double", "Float", "Integer" ]
ctypes = {
'Boolean' : 'GLboolean',
'Double' : 'GLdouble',
'Float' : 'GLfloat',
'Integer' : 'GLint'
}
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE GENERATED BY state_get.txt AND THE state_get.py SCRIPT */
#include <stdio.h>
#include <math.h>
#include "state.h"
#include "state/cr_statetypes.h"
static GLint __clampd_to_int( GLdouble d )
{
/* -1.0 -> MIN_INT, 1.0 -> MAX_INT */
if ( d > 1.0 )
return 0x7fffffff;
if ( d < -1.0 )
return 0x80000000;
return (GLint) floor( d * 2147483647.5 );
}
static GLint __clampf_to_int( GLfloat f )
{
/* -1.0f -> MIN_INT, 1.0f -> MAX_INT */
if ( f > 1.0f )
return 0x7fffffff;
if ( f < -1.0f )
return 0x80000000;
return (GLint) floor( f * 2147483647.5f );
}
"""
header = """
{
CRContext *g = GetCurrentContext();
if (g->current.inBeginEnd)
{
crStateError(__LINE__, __FILE__, GL_INVALID_OPERATION,
"glGet called in Begin/End");
return;
}
if ( pname == GL_CURRENT_INDEX || pname == GL_CURRENT_COLOR ||
pname == GL_CURRENT_SECONDARY_COLOR_EXT ||
pname == GL_CURRENT_FOG_COORDINATE_EXT ||
pname == GL_CURRENT_NORMAL || pname == GL_EDGE_FLAG ||
pname == GL_CURRENT_TEXTURE_COORDS )
{
#if 0
crStateError(__LINE__,__FILE__, GL_INVALID_OPERATION,
"Unimplemented glGet of a 'current' value" );
#else
crStateCurrentRecover();/* &g->current, &sb->current, g->bitID );*/
#endif
}
switch ( pname ) {
"""
for rettype in types:
print ''
print 'void STATE_APIENTRY crStateGet%sv( GLenum pname, %s *params )' % ( rettype, ctypes[rettype] )
print header
keys = params.keys()
keys.sort()
for pname in keys:
print '\t\tcase %s:' % pname
(srctype,fields) = params[pname]
try:
cvt = convert[srctype][rettype]
i = 0
for field in fields:
expr = cvt % field
print '\t\t\tparams[%d] = %s;' % (i,expr)
i += 1
except:
print '\t\t\tcrStateError(__LINE__,__FILE__,GL_INVALID_OPERATION, "Unimplemented glGet!");'
print "\t\t\tbreak;"
keys = extended_params.keys();
keys.sort()
for pname in keys:
(srctype,ifdef,fields) = extended_params[pname]
ext = ifdef[3:] # the extension name with the "GL_" prefix removed
#print '#ifdef %s' % ifdef
print '#ifdef CR_%s' % ext
print '\t\tcase %s:' % pname
if ext != 'OPENGL_VERSION_1_2':
print '\t\t\tif (g->extensions.%s) {' % ext
try:
cvt = convert[srctype][rettype]
i = 0
for field in fields:
expr = cvt % field
if field[0] == '%':
command = string.split(field, '%')
print '\t\t\t\t%s;' % command[1]
continue
elif ext != 'OPENGL_VERSION_1_2':
print '\t\t\t\tparams[%d] = %s;' % (i,expr)
else:
print '\t\t\tparams[%d] = %s;' % (i,expr)
i += 1
except:
print '\t\t\tcrStateError(__LINE__,__FILE__,GL_INVALID_OPERATION, "Unimplemented glGet!");'
if ext != 'OPENGL_VERSION_1_2':
print "\t\t\t}"
print "\t\t\telse {"
print '\t\t\t\tcrStateError(__LINE__,__FILE__,GL_INVALID_ENUM, "glGet%sv");' % rettype
print "\t\t\t}"
print "\t\t\tbreak;"
#print '#endif /* %s */' % ifdef
print '#endif /* CR_%s */' % ext
print '\t\tdefault:'
print '\t\t\tcrStateError(__LINE__, __FILE__, GL_INVALID_ENUM, "glGet: Unknown enum: 0x%x", pname);'
print '\t\t\treturn;'
print '\t}'
print '}'
| bsd-3-clause |
XiaodunServerGroup/xiaodun-platform | common/lib/capa/capa/customrender.py | 8 | 3033 | """
This has custom renderers: classes that know how to render certain problem tags (e.g. <math> and
<solution>) to html.
These tags do not have state, so they just get passed the system (for access to render_template),
and the xml element.
"""
from .registry import TagRegistry
import logging
import re
from lxml import etree
import xml.sax.saxutils as saxutils
from .registry import TagRegistry
log = logging.getLogger(__name__)
registry = TagRegistry()
#-----------------------------------------------------------------------------
class MathRenderer(object):
tags = ['math']
def __init__(self, system, xml):
r"""
Render math using latex-like formatting.
Examples:
<math>$\displaystyle U(r)=4 U_0 $</math>
<math>$r_0$</math>
We convert these to [mathjax]...[/mathjax] and [mathjaxinline]...[/mathjaxinline]
TODO: use shorter tags (but this will require converting problem XML files!)
"""
self.system = system
self.xml = xml
mathstr = re.sub(r'\$(.*)\$', r'[mathjaxinline]\1[/mathjaxinline]', xml.text)
mtag = 'mathjax'
if not r'\displaystyle' in mathstr:
mtag += 'inline'
else:
mathstr = mathstr.replace(r'\displaystyle', '')
self.mathstr = mathstr.replace('mathjaxinline]', '%s]' % mtag)
def get_html(self):
"""
Return the contents of this tag, rendered to html, as an etree element.
"""
# TODO: why are there nested html tags here?? Why are there html tags at all, in fact?
html = '<html><html>%s</html><html>%s</html></html>' % (
self.mathstr, saxutils.escape(self.xml.tail))
try:
xhtml = etree.XML(html)
except Exception as err:
if self.system.DEBUG:
msg = '<html><div class="inline-error"><p>Error %s</p>' % (
str(err).replace('<', '<'))
msg += ('<p>Failed to construct math expression from <pre>%s</pre></p>' %
html.replace('<', '<'))
msg += "</div></html>"
log.error(msg)
return etree.XML(msg)
else:
raise
return xhtml
registry.register(MathRenderer)
#-----------------------------------------------------------------------------
class SolutionRenderer(object):
"""
A solution is just a <span>...</span> which is given an ID, that is used for displaying an
extended answer (a problem "solution") after "show answers" is pressed.
Note that the solution content is NOT rendered and returned in the HTML. It is obtained by an
ajax call.
"""
tags = ['solution']
def __init__(self, system, xml):
self.system = system
self.id = xml.get('id')
def get_html(self):
context = {'id': self.id}
html = self.system.render_template("solutionspan.html", context)
return etree.XML(html)
registry.register(SolutionRenderer)
| agpl-3.0 |
LarsFronius/ansible | lib/ansible/playbook/role/definition.py | 11 | 9059 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['RoleDefinition']
class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
super(RoleDefinition, self).__init__()
self._play = play
self._variable_manager = variable_manager
self._loader = loader
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
#def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
# role names that are simply numbers can be parsed by PyYAML
# as integers even when quoted, so turn it into a string type
if isinstance(ds, int):
ds = "%s" % ds
assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# save the original ds for use later
self._ds = ds
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds.update(new_role_def)
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
'''
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
'''
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
# if we have the required datastructures, and if the role_name
# contains a variable, try and template it now
if self._variable_manager:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
templar = Templar(loader=self._loader, variables=all_vars)
if templar._contains_vars(role_name):
role_name = templar.template(role_name)
return role_name
def _load_role_path(self, role_name):
'''
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
'''
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
# next, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# finally as a last resort we look in the current basedir as set
# in the loader (which should be the playbook dir itself) but without
# the roles/ dir appended
role_search_paths.append(self._loader.get_basedir())
# create a templar class to template the dependency names, in
# case they contain variables
if self._variable_manager is not None:
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
else:
all_vars = dict()
templar = Templar(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
path = templar.template(path)
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# if not found elsewhere try to extract path from name
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def _split_role_params(self, ds):
'''
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
'''
role_def = dict()
role_params = dict()
base_attribute_names = frozenset(self._valid_attrs.keys())
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
# FIXME: hard-coded list of exception key names here corresponds to the
# connection fields in the Base class. There may need to be some
# other mechanism where we exclude certain kinds of field attributes,
# or make this list more automatic in some way so we don't have to
# remember to update it manually.
if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
if key in ('connection', 'port', 'remote_user'):
display.deprecated("Using '%s' as a role param has been deprecated. " % key + \
"In the future, these values should be entered in the `vars:` " + \
"section for roles, but for now we'll store it as both a param and an attribute.", version="2.7")
role_def[key] = value
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path
| gpl-3.0 |
oasiswork/odoo | addons/hr_recruitment/report/hr_recruitment_report.py | 325 | 4836 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from .. import hr_recruitment
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_recruitment_report(osv.Model):
_name = "hr.recruitment.report"
_description = "Recruitments Statistics"
_auto = False
_rec_name = 'date_create'
_order = 'date_create desc'
_columns = {
'user_id': fields.many2one('res.users', 'User', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_create': fields.datetime('Create Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'date_closed': fields.date('Closed', readonly=True),
'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department','Department',readonly=True),
'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'),
'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')),
'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')),
'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner',readonly=True),
'available': fields.float("Availability"),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'last_stage_id': fields.many2one ('hr.recruitment.stage', 'Last Stage'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_recruitment_report')
cr.execute("""
create or replace view hr_recruitment_report as (
select
min(s.id) as id,
s.create_date as date_create,
date(s.date_closed) as date_closed,
s.date_last_stage_update as date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.job_id,
s.type_id,
sum(s.availability) as available,
s.department_id,
s.priority,
s.stage_id,
s.last_stage_id,
sum(salary_proposed) as salary_prop,
(sum(salary_proposed)/count(*)) as salary_prop_avg,
sum(salary_expected) as salary_exp,
(sum(salary_expected)/count(*)) as salary_exp_avg,
extract('epoch' from (s.write_date-s.create_date))/(3600*24) as delay_close,
count(*) as nbr
from hr_applicant s
group by
s.date_open,
s.create_date,
s.write_date,
s.date_closed,
s.date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.stage_id,
s.last_stage_id,
s.type_id,
s.priority,
s.job_id,
s.department_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chris-chris/tensorflow | tensorflow/python/util/deprecation.py | 6 | 15050 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', [
'THIS FUNCTION IS DEPRECATED. It will be removed %s.' % (
'in a future version' if date is None else ('after %s' % date)),
'Instructions for updating:'])
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED. '
'They will be removed %s.' % (
'in a future version' if date is None else ('after %s' % date)),
'Instructions for updating:'])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location():
"""Returns call location given level up from current call."""
frame = tf_inspect.currentframe()
if frame:
# CPython internals are available, use them for performance.
# walk back two frames to get to deprecated function caller.
first_frame = frame.f_back
second_frame = first_frame.f_back
frame = second_frame if second_frame else first_frame
return '%s:%d' % (frame.f_code.co_filename, frame.f_lineno)
else:
# Slow fallback path
stack = tf_inspect.stack(0) # 0 avoids generating unused context
entry = stack[2]
return '%s:%d' % (entry[1], entry[2])
def deprecated(date, instructions):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
"""Deprecation wrapper."""
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs):
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String. or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, or the second element of a deprecated_tuple is not a
list.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to thier spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = dict(
(name, pos) for (pos, name) in enumerate(arg_spec.args))
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
deprecated_arg_names = _get_arg_names_to_ok_vals()
arg_spec = tf_inspect.getargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.keywords in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.keywords]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.keywords)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated',
_add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions))
return deprecated_wrapper
def deprecated_arg_values(date, instructions, **deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and named_args[arg_name] == arg_value:
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated',
_add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions))
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError("Cannot specify both '%s' and '%s'" %
(old_name, new_name))
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
| apache-2.0 |
grehx/spark-tk | integration-tests/tests/setup.py | 12 | 2347 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import sys
import os
import shutil
from threading import Lock
d = os.path.dirname
root_path = d(d(d(__file__)))
print "[setup.py] root_path = %s" % root_path
python_folder = os.path.join(root_path, "python")
sys.path.insert(0, python_folder)
lock = Lock()
global_tc = None
# Establish a sparktk Context with SparkContext for the test session
@pytest.fixture(scope="session")
def tc(request):
global global_tc
with lock:
if global_tc is None:
from sparktk import TkContext
from sparktk import create_sc
from sparktk.tests import utils
#from sparktk.loggers import loggers
#loggers.set("d", "sparktk.sparkconf")
sc = create_sc(master='local[2]',
app_name="pytest-pyspark-local-testing",
extra_conf_dict={"spark.hadoop.fs.default.name": "file:///",
"spark.ui.enabled": 'false' })
request.addfinalizer(lambda: sc.stop())
global_tc = TkContext(sc)
global_tc.testing = utils
return global_tc
sandbox_path = "sandbox"
def get_sandbox_path(path):
return os.path.join(sandbox_path, path)
def rm(path, error=False):
try:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path): shutil.rmtree(path)
except Exception as e:
if error:
raise
print "[WARN] %s" % e
def clear_folder(folder, warn=False):
for file_name in os.listdir(folder):
file_path = os.path.join(folder, file_name)
rm(file_path, warn)
| apache-2.0 |
SKIRT/PTS | magic/plot/imagegrid.py | 1 | 106384 | # -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.plot.imagegrid Contains the ImageGridPlotter classes.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import aplpy
from abc import ABCMeta, abstractproperty
import matplotlib.pyplot as plt
from matplotlib import cm
from collections import OrderedDict, defaultdict
# Import the relevant PTS classes and modules
from ..tools.plotting import get_vmin_vmax
from ...core.tools import filesystem as fs
from ..core.frame import Frame
from ...core.basics.log import log
from ...core.basics.configurable import Configurable
from ...core.tools.utils import lazyproperty, memoize_method
from ...core.tools import sequences
from ..core.image import Image
from ...core.basics.distribution import Distribution
from ...core.basics.plot import MPLFigure
from ...core.basics.composite import SimplePropertyComposite
from ...core.basics.plot import normal_colormaps
from ..core.list import uniformize
from ...core.tools import numbers
from ...core.tools import types
# ------------------------------------------------------------------------------
light_theme = "light"
dark_theme = "dark"
themes = [light_theme, dark_theme]
# ------------------------------------------------------------------------------
default_cmap = "inferno"
default_residual_cmap = 'RdBu'
default_absolute_residual_cmap = "OrRd"
# ------------------------------------------------------------------------------
# Initialize dictionary for light theme settings
light_theme_settings = OrderedDict()
# Set parameters
light_theme_settings['axes.facecolor'] = 'white'
light_theme_settings['savefig.facecolor'] = 'white'
light_theme_settings['axes.edgecolor'] = 'black'
light_theme_settings['xtick.color'] = 'black'
light_theme_settings['ytick.color'] = 'black'
light_theme_settings["axes.labelcolor"] = 'black'
light_theme_settings["text.color"] = 'black'
# light_theme_settings["axes.titlecolor"]='black'
# ------------------------------------------------------------------------------
# Initialize dictionary for dark theme settings
dark_theme_settings = OrderedDict()
# Set parameters
dark_theme_settings['axes.facecolor'] = 'black'
dark_theme_settings['savefig.facecolor'] = 'black'
dark_theme_settings['axes.edgecolor'] = 'white'
dark_theme_settings['xtick.color'] = 'white'
dark_theme_settings['ytick.color'] = 'white'
dark_theme_settings["axes.labelcolor"] ='white'
dark_theme_settings["text.color"] = 'white'
#plt.rcParams["axes.titlecolor"] = 'white'
# ------------------------------------------------------------------------------
class ImagePlotSettings(SimplePropertyComposite):
"""
This class ...
"""
__metaclass__ = ABCMeta
# ------------------------------------------------------------------------------
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ImagePlotSettings, self).__init__()
# Define properties
self.add_property("label", "string", "label for the image", None)
self.add_property("vmin", "real", "plotting minimum")
self.add_property("vmax", "real", "plotting maximum")
self.add_boolean_property("soft_vmin", "soft vmin", False) #, None) # use None as default to use plotter config if not defined
self.add_boolean_property("soft_vmax", "soft vmax", False) #, None) # use None as default to use plotter config if not defined
self.add_property("cmap", "string", "colormap", choices=normal_colormaps)
# ------------------------------------------------------------------------------
class ImageGridPlotter(Configurable):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ImageGridPlotter, self).__init__(*args, **kwargs)
# The figure
self.figure = None
# The grid
self.grid = None
# The plots
self.plots = None
# The settings
self.settings = defaultdict(self.image_settings_class)
# -----------------------------------------------------------------
@abstractproperty
def image_settings_class(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def names(self):
"""
This function ...
:return:
"""
pass
# ------------------------------------------------------------------------------
@property
def light(self):
return self.config.theme == light_theme
# -----------------------------------------------------------------
@property
def dark(self):
return self.config.theme == dark_theme
# -----------------------------------------------------------------
@lazyproperty
def text_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def frame_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def background_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "white"
# Dark theme
elif self.dark: return "black"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@abstractproperty
def first_frame(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@lazyproperty
def center(self):
"""
This function ...
:return:
"""
# Center coordinate is defined
if self.config.center is not None: return self.config.center
# Not defined?
return self.first_frame.center_sky
# -----------------------------------------------------------------
@property
def ra_center(self):
return self.center.ra
# ------------------------------------------------------------------------------
@property
def dec_center(self):
return self.center.dec
# ------------------------------------------------------------------------------
@lazyproperty
def ra_center_deg(self):
return self.ra_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def dec_center_deg(self):
return self.dec_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def spacing_deg(self):
return self.config.spacing.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def radius_deg(self):
return self.config.radius.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def colormap(self):
return cm.get_cmap(self.config.cmap)
# -----------------------------------------------------------------
@lazyproperty
def nan_color(self):
if self.config.nan_color is not None: return self.config.nan_color
else: return self.colormap(0)
# -----------------------------------------------------------------
@lazyproperty
def theme_settings(self):
if self.light: return light_theme_settings
elif self.dark: return dark_theme_settings
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ImageGridPlotter, self).setup(**kwargs)
# plt.rcParams.update({'font.size':20})
plt.rcParams["axes.labelsize"] = self.config.axes_label_size # 16 #default 20
plt.rcParams["xtick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["ytick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["legend.fontsize"] = self.config.legend_fontsize # 10 #default 14
plt.rcParams["legend.markerscale"] = self.config.legend_markers_cale
plt.rcParams["lines.markersize"] = self.config.lines_marker_size # 4 #default 4
plt.rcParams["axes.linewidth"] = self.config.linewidth
# Set theme-specific settings
for label in self.theme_settings: plt.rcParams[label] = self.theme_settings[label]
# plt.rcParams['xtick.major.size'] = 5
# plt.rcParams['xtick.major.width'] = 2
# plt.rcParams['ytick.major.size'] = 5
# plt.rcParams['ytick.major.width'] = 2
# ------------------------------------------------------------------------------
def plot_images(images, **kwargs):
"""
This function ...
:param images:
:param kwargs:
:return:
"""
# Create the plotter
plotter = StandardImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(images=images)
# -----------------------------------------------------------------
class StandardImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImagePlotSettings, self).__init__(**kwargs)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
class StandardImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.frames = OrderedDict()
# The error frames
self.errors = OrderedDict()
# The masks
self.masks = OrderedDict()
# The regions
self.regions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
"""
This function ...
:return:
"""
return StandardImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
@property
def names(self):
"""
This function ...
:return:
"""
return self.frames.keys()
# ------------------------------------------------------------------------------
def add_image(self, name, image, errors=None, mask=None, regions=None, replace=False, settings=None):
"""
This function ...
:param name:
:param image:
:param errors:
:param mask:
:param regions:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Image is passed
if isinstance(image, Image):
# Get the frame
frame = image.primary
# Get errors?
# Get mask?
# Get regions?
# Frame is passed
elif isinstance(image, Frame): frame = image
# Invalid
else: raise ValueError("Invalid value for 'image': must be Frame or Image")
# Add frame
self.frames[name] = frame
# Add errors
if errors is not None: self.errors[name] = errors
# Add regions
if regions is not None: self.regions[name] = regions
# Add mask
if mask is not None: self.masks[name] = mask
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Images
if self.config.write_images: self.write_images()
# Frames
if self.config.write_frames: self.write_frames()
# Masks
if self.config.write_masks: self.write_masks()
# Regions
if self.config.write_regions: self.write_regions()
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_frames(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_masks(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_regions(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# ------------------------------------------------------------------------------
images_name = "images"
observations_name = "observations"
models_name = "models"
errors_name = "errors"
model_errors_name = "model_errors"
residuals_name = "residuals"
distributions_name = "distributions"
settings_name = "settings"
# ------------------------------------------------------------------------------
observation_name = "observation"
model_name = "model"
observation_or_model = [observation_name, model_name]
# ------------------------------------------------------------------------------
horizontal_mode, vertical_mode = "horizontal", "vertical"
default_direction = vertical_mode
directions = [horizontal_mode, vertical_mode]
# ------------------------------------------------------------------------------
class ResidualImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ResidualImagePlotSettings, self).__init__()
# Define properties
self.add_property("residual_amplitude", "percentage", "amplitude of the residual plots")
self.add_boolean_property("soft_residual_amplitude", "soft residual amplitude", False) #, None) # use None as default to use plotter config if not defined
self.add_property("residual_cmap", "string", "colormap for the residual plots") # no choices because can be absolute or not
# Set properties
self.set_properties(kwargs)
# ------------------------------------------------------------------------------
def plot_residuals(observations, models, **kwargs):
"""
This function ...
:param observations:
:param models:
:param kwargs:
:return:
"""
# Create the plotter
plotter = ResidualImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(observations=observations, models=models)
# -----------------------------------------------------------------
class ResidualImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ResidualImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.observations = OrderedDict()
self.errors = OrderedDict()
self.models = OrderedDict()
self.model_errors = OrderedDict()
self.residuals = OrderedDict()
# The residual distributions
self.distributions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
return ResidualImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create the residual frames
self.create_residuals()
# Create the residual distributions
self.create_distributions()
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ResidualImageGridPlotter, self).setup(**kwargs)
# Load the images
if kwargs.get(images_name, None) is not None: self.add_images(kwargs.pop(images_name))
if kwargs.get(observations_name, None) is not None: self.add_observations(kwargs.pop(observations_name))
if kwargs.get(models_name, None) is not None: self.add_models(kwargs.pop(models_name))
if kwargs.get(errors_name, None) is not None: self.add_error_maps(kwargs.pop(errors_name))
if kwargs.get(residuals_name, None) is not None: self.add_residual_maps(kwargs.pop(residuals_name))
# Nothing added
if self.config.from_directory is not None: self.load_from_directory(self.config.from_directory)
elif not self.has_images: self.load_from_directory(self.config.path)
# Initialize the figure
self.initialize_figure()
# ------------------------------------------------------------------------------
@property
def figsize(self):
return (15,10)
# ------------------------------------------------------------------------------
@property
def horizontal(self):
return self.config.direction == horizontal_mode
# ------------------------------------------------------------------------------
@property
def vertical(self):
return self.config.direction == vertical_mode
# ------------------------------------------------------------------------------
@lazyproperty
def npanels(self):
if self.config.distributions: return 4 # observation, model, residual, distribution
else: return 3 # observation, model, residual
# ------------------------------------------------------------------------------
@lazyproperty
def nrows(self):
if self.horizontal: return self.npanels
elif self.vertical: return self.nimages
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@lazyproperty
def ncolumns(self):
if self.horizontal: return self.nimages
elif self.vertical: return self.npanels
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@property
def share_x(self):
return True
# ------------------------------------------------------------------------------
@property
def share_y(self):
return True
# ------------------------------------------------------------------------------
def initialize_figure(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Initializing the figure with size " + str(self.figsize) + " ...")
# Create the plot
self.figure = MPLFigure(size=self.figsize)
# Create plots
#self.plots = self.figure.create_grid(self.nrows, self.ncolumns, sharex=self.share_x, sharey=self.share_y)
# Create grid
self.grid = self.figure.create_gridspec(self.nrows, self.ncolumns, hspace=0.0, wspace=0.0)
# Initialize structure to contain the plots
#print("NCOLUMNS", self.ncolumns)
#print("NROWS", self.nrows)
self.plots = [[None for i in range(self.ncolumns)] for j in range(self.nrows)]
# ------------------------------------------------------------------------------
@property
def all_names(self):
return sequences.combine_unique(self.observation_names, self.model_names, self.errors_names, self.residuals_names)
# ------------------------------------------------------------------------------
@property
def observation_names(self):
return self.observations.keys()
# ------------------------------------------------------------------------------
def has_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.observation_names
# ------------------------------------------------------------------------------
@property
def model_names(self):
return self.models.keys()
# ------------------------------------------------------------------------------
def has_model(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_names
# ------------------------------------------------------------------------------
@property
def errors_names(self):
return self.errors.keys()
# ------------------------------------------------------------------------------
def has_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.errors
# ------------------------------------------------------------------------------
@property
def model_errors_names(self):
return self.model_errors.keys()
# ------------------------------------------------------------------------------
def has_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_errors
# ------------------------------------------------------------------------------
@property
def residuals_names(self):
return self.residuals.keys()
# ------------------------------------------------------------------------------
def has_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.residuals
# ------------------------------------------------------------------------------
@property
def distribution_names(self):
return self.distributions.keys()
# ------------------------------------------------------------------------------
def has_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.distributions
# ------------------------------------------------------------------------------
@property
def settings_names(self):
return self.settings.keys()
# ------------------------------------------------------------------------------
def has_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.settings_names
# ------------------------------------------------------------------------------
@property
def names(self):
return self.observation_names
# ------------------------------------------------------------------------------
@property
def first_name(self):
return self.names[0]
# ------------------------------------------------------------------------------
@property
def first_observation(self):
return self.get_observation(self.first_name)
# ------------------------------------------------------------------------------
@property
def first_frame(self):
return self.first_observation
# ------------------------------------------------------------------------------
@property
def nimages(self):
return len(self.names)
# ------------------------------------------------------------------------------
@property
def has_images(self):
return self.nimages > 0
# ------------------------------------------------------------------------------
def add_image(self, name, observation, model=None, errors=None, model_errors=None, residuals=None, replace=False,
settings=None):
"""
This function ...
:param name:
:param observation:
:param model:
:param errors:
:param model_errors:
:param residuals:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Check type of the image
if isinstance(observation, Image):
# Get observation frame
if observation_name in observation.frame_names: observation = observation.frames[observation_name]
else: observation = observation.primary
# Get model frame
if model_name in observation.frame_names:
if model is not None: raise ValueError("Cannot pass model frame if image contains model frame")
model = observation.frames[model_name]
# Get errors frame
if errors_name in observation.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = observation.frames[errors_name]
# Get model errors frame
if model_errors_name in observation.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if image contains model error map")
model_errors = observation.frames[model_errors_name]
# Get residuals frame
if residuals_name in observation.frame_names:
if residuals is not None: raise ValueError("Cannot pass residual map if image contains residual map")
residuals = observation.frames[residuals_name]
# Check the type of the model image
if model is not None and isinstance(model, Image):
# Get the model frame
if model_name in model.frame_names: model = model.frames[model_name]
else: model = model.primary
# Get the model errors frame
if model_errors_name in model.frame_names:
if errors_name in model.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains model error map")
model_errors = model.frames[model_errors_name]
elif errors_name in model.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains error map")
model_errors = model.frames[errors_name]
# Add observation
self.observations[name] = observation
# Add model
if model is not None: self.models[name] = model
# Add errors
if errors is not None: self.errors[name] = errors
# Add model errors
if model_errors is not None: self.model_errors[name] = model_errors
# Add residuals
if residuals is not None: self.residuals[name] = residuals
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def add_observation(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get observation frame
if observation_name in frame.frame_names: frame = frame.frames[observation_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", observation_name, errors_name]): raise ValueError("Observation image contains too many frames")
# Add observation frame
self.observations[name] = frame
# Add error map
if errors is not None: self.errors[name] = errors
# ------------------------------------------------------------------------------
def add_model(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get model frame
if model_name in frame.frame_names: frame = frame.frames[model_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if model_errors_name in frame.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
elif model_errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[model_errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", model_name, errors_name, model_errors_name]): raise ValueError("Model image contains too many frames")
# Add model frame
self.models[name] = frame
# Add error map
if errors is not None: self.model_errors[name] = errors
# ------------------------------------------------------------------------------
def add_errors(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.errors[name] = frame
# ------------------------------------------------------------------------------
def add_model_errors(self, name, frame):
"""
Thisn function ...
:param name:
:param frame:
:return:
"""
# Add
self.model_errors[name] = frame
# ------------------------------------------------------------------------------
def add_residuals(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.residuals[name] = frame
# ------------------------------------------------------------------------------
def add_distribution(self, name, distribution):
"""
This function ...
:param name:
:param distribution:
:return:
"""
# Add
self.distributions[name] = distribution
# -----------------------------------------------------------------
def add_settings(self, name, **settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def set_settings(self, name, settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name] = settings
# ------------------------------------------------------------------------------
def set_setting(self, name, setting_name, value):
"""
This function ...
:param name:
:param setting_name:
:param value:
:return:
"""
# Set
self.settings[name][setting_name] = value
# ------------------------------------------------------------------------------
def add_images(self, images):
"""
This function ...
:param images:
:return:
"""
# Debugging
log.debug("Adding images ...")
# Loop over the images
for name in images:
# Get the image
image = images[name]
# Add
self.add_image(name, image)
# ------------------------------------------------------------------------------
def add_observations(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding observations ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_observation(name, frame)
# ------------------------------------------------------------------------------
def add_models(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding models ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_model(name, frame)
# ------------------------------------------------------------------------------
def add_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_errors(name, frame)
# ------------------------------------------------------------------------------
def add_model_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding model error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_model_errors(name, frame)
# ------------------------------------------------------------------------------
def add_residual_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding residual maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Are there FITS files in the directory?
if fs.has_files_in_path(path, extension="fits"): self.load_images_from_directory(path)
# Are there subdirectories?
elif fs.has_directories_in_path(path):
# Determine paths
images_path = fs.join(path, images_name)
observations_path = fs.join(path, observations_name)
models_path = fs.join(path, models_name)
residuals_path = fs.join(path, residuals_name)
settings_path = fs.join(path, settings_name)
# Load observations
if fs.is_directory(images_path): self.load_images_from_directory(path)
if fs.is_directory(observations_path): self.load_observations_from_directory(path)
if fs.is_directory(models_path): self.load_models_from_directory(path)
if fs.is_directory(residuals_path): self.load_residuals_from_directory(path)
if fs.is_directory(settings_path): self.load_settings_from_directory(path)
# No FITS files nor subdirectories
else: raise IOError("No image files nor subdirectories found in '" + path + "'")
# ------------------------------------------------------------------------------
def load_images_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading image files from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading '" + name + "' image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Add the image
self.add_image(name, image)
# ------------------------------------------------------------------------------
def load_observations_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading observed image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' observed image ...")
# Get header
#header = get_header(filepath)
# Get the filter
#fltr = get_filter(name, header=header)
# Check whether the filter is in the list of filters to be plotted
#if fltr not in config.filters: continue
# Get the index for this filter
#index = config.filters.index(fltr)
# Load the image
#frame = Frame.from_file(filepath)
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_observation(name, image)
# ------------------------------------------------------------------------------
def load_models_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading model image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "name"]):
# Debugging
log.debug("Loading the '" + name + "' model image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_model(name, image)
# ------------------------------------------------------------------------------
def load_residuals_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading residual image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' residual map ...")
# Load the frame
frame = Frame.from_file(filepath)
# Add the map
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_settings_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading plotting settings from '" + path + "' ...")
# Loop over the dat files
for name, filepath in fs.files_in_path(path, extension="dat", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' settings ...")
# Load the settings
settings = ImagePlotSettings.from_file(filepath)
# Set the settings
self.set_settings(name, settings)
# ------------------------------------------------------------------------------
def get_observation_or_model(self, name):
"""
This function ...
:param name:
:return:
"""
if self.has_observation(name): return self.get_observation(name)
elif self.has_model(name): return self.get_model(name)
else: raise ValueError("Doesn't have observation or model for name '" + name + "'")
# ------------------------------------------------------------------------------
def get_filter(self, name):
"""
This function ...
:param name:
:return:
"""
return self.get_observation_or_model(name).filter
# ------------------------------------------------------------------------------
def get_wcs(self, name):
"""
Thisf unction ...
:param name:
:return:
"""
return self.get_observation_or_model(name).wcs
# ------------------------------------------------------------------------------
def calculate_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
# Get the frames
#observation = self.observations[name]
#model = self.models[name]
# Uniformize
observation, model = uniformize(self.observations[name], self.models[name])
# Error-weighed residuals
if self.config.weighed:
if self.config.weighing_reference == observation_name:
if not self.has_errors(name): raise ValueError("No errors for the '" + name + "' image")
errors = self.get_errors(name)
elif self.config.weighing_reference == model_name:
if not self.has_model_errors(name): raise ValueError("No model errors for the '" + name + "' image")
errors = self.get_model_errors(name)
else: raise ValueError("Invalid value for 'weighing_reference'")
# Calculate
res = Frame((model - observation) / errors, wcs=observation.wcs)
# Relative residuals
elif self.config.relative: res = Frame((model - observation) / observation, wcs=observation.wcs)
# Absolute residuals
else: res = Frame(model - observation, wcs=observation.wcs)
# Take absolute values?
if self.config.absolute: res = res.absolute
# Return the residual
return res
# ------------------------------------------------------------------------------
def create_residuals(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Creating the residual frames ...")
# Loop over the observed images
for name in self.names:
# Checks
if not self.has_model(name): continue
if self.has_residuals(name): continue
# Debugging
log.debug("Creating residual frame for the '" + name + "' image ...")
# Create
res = self.calculate_residuals(name)
# Add the residuals frame
self.residuals[name] = res
# ------------------------------------------------------------------------------
def create_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the residual distributions ...")
# Loop over the residual maps
for name in self.residuals_names:
# Checks
if self.has_distribution(name): continue
# Debugging
log.debug("Creating distribution for the '" + name + "' residuals ...")
# Get the residual map
residuals = self.get_residuals(name)
# Create the distribution
distribution = Distribution.from_data("Residual", residuals, sigma_clip=self.config.sigma_clip_distributions, sigma_level=self.config.sigma_clip_level)
# Add the distribution
self.distributions[name] = distribution
# ------------------------------------------------------------------------------
def get_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return self.observations[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_observation_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add observation frame
image.add_frame(self.get_observation(name), observation_name)
# Add error map
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_model(self, name):
"""
This function ...
:param name:
:return:
"""
return self.models[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_model_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add model frame
image.add_frame(self.get_model(name), model_name)
# Add error map
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.errors[name]
# ------------------------------------------------------------------------------
def get_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.model_errors[name]
# ------------------------------------------------------------------------------
def get_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return self.residuals[name]
# ------------------------------------------------------------------------------
def get_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return self.distributions[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create the image
image = Image(name=name)
# Add the observation
if self.has_observation(name): image.add_frame(self.get_observation(name), observation_name)
# Add the model
if self.has_model(name): image.add_frame(self.get_model(name), model_name)
# Add the errors
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Add the model errors
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), model_errors_name)
# Add the residuals
if self.has_residuals(name): image.add_frame(self.get_residuals(name), residuals_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return self.settings[name]
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write observations
if self.config.write_observations: self.write_observations()
# Write models
if self.config.write_models: self.write_models()
# Write residual frames
if self.config.write_residuals: self.write_residuals()
# Write the images
if self.config.write_images: self.write_images()
# Write the distributions
if self.config.write_distributions: self.write_distributions()
# Write the settings
if self.config.write_settings: self.write_settings()
# ------------------------------------------------------------------------------
@lazyproperty
def images_path(self):
return self.output_path_directory(images_name)
# ------------------------------------------------------------------------------
@lazyproperty
def observations_path(self):
return self.output_path_directory(observations_name)
# ------------------------------------------------------------------------------
@lazyproperty
def models_path(self):
return self.output_path_directory(models_name)
# ------------------------------------------------------------------------------
@lazyproperty
def residuals_path(self):
return self.output_path_directory(residuals_name)
# ------------------------------------------------------------------------------
@lazyproperty
def distributions_path(self):
return self.output_path_directory(distributions_name)
# ------------------------------------------------------------------------------
@lazyproperty
def settings_path(self):
return self.output_path_directory(settings_name)
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over all images
for name in self.all_names:
# Determine path
path = fs.join(self.images_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' image ...")
# Get image
image = self.get_image(name)
# Save the image
image.saveto(path)
# ------------------------------------------------------------------------------
def write_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the observed frames ...")
# Loop over the observed images
for name in self.observation_names:
# Determine the path
path = fs.join(self.observations_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' observed image ...")
# Get the frame
frame = self.get_observation_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the model frames ...")
# Loop over the model images
for name in self.model_names:
# Determine the path
path = fs.join(self.models_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' model image ...")
# Get the frame
frame = self.get_model_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual frames ...")
# Loop over the residual maps
for name in self.residuals_names:
# Determine the path
path = fs.join(self.residuals_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual frame ...")
# Get the residual map
frame = self.get_residuals(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual distributions ...")
# Loop over the distributions
for name in self.distribution_names:
# Determine the path
path = fs.join(self.distributions_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual distribution ...")
# Get the distribution
distribution = self.get_distribution(name)
# Save
distribution.saveto(path)
# ------------------------------------------------------------------------------
def write_settings(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the plotting settings ...")
# Loop over the settings
for name in self.settings_names:
# Determine the path
path = fs.join(self.settings_path, name + ".dat")
# Debugging
log.debug("Writing the '" + name + "' plotting settings ...")
# Get the settings
settings = self.get_settings(name)
# Save
settings.saveto(path)
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot observations
self.plot_observations()
# Plot models
self.plot_models()
# Plot residuals
self.plot_residuals()
# Plot distributions
if self.config.distributions: self.plot_distributions()
# Finish the plot
self.finish()
# ------------------------------------------------------------------------------
def get_label(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return name
# Get the settings
settings = self.get_settings(name)
# Return
if settings.label is not None: return settings.label
else: return name
# ------------------------------------------------------------------------------
def get_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return self.config.cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.cmap is not None: return settings.cmap
else: return self.config.cmap
# ------------------------------------------------------------------------------
@property
def config_residual_cmap(self):
"""
This function ...
:return:
"""
if self.config.absolute: return self.config.absolute_residual_cmap
else: return self.config.residual_cmap
# ------------------------------------------------------------------------------
def get_residual_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config_residual_cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.residual_cmap is not None: return settings.residual_cmap
else: return self.config_residual_cmap
# ------------------------------------------------------------------------------
def get_limits(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.vmin, self.config.vmax, False, False
# Get the settings
settings = self.get_settings(name)
# Get limits
vmin = settings.vmin if settings.vmin is not None else self.config.vmin
vmax = settings.vmax if settings.vmax is not None else self.config.vmax
# Get flags
soft_vmin = settings.soft_vmin if settings.vmin is not None else False # don't use True flag if vmin is not set in settings
soft_vmax = settings.soft_vmax if settings.vmax is not None else False # don't use True flag if vmax is not set in settings
# Return
return vmin, vmax, soft_vmin, soft_vmax
# ------------------------------------------------------------------------------
def get_residual_amplitude(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.residual_amplitude, False
# Get the settings
settings = self.get_settings(name)
# Get amplitude
amplitude = settings.residual_amplitude if settings.residual_amplitude is not None else self.config.residual_amplitude
# Get flag
soft_amplitude = settings.soft_residual_amplitude if settings.residual_amplitude is not None else False # don't use True flag if amplitude is not set in settings
# Return
return amplitude, soft_amplitude
# ------------------------------------------------------------------------------
def set_limits(self, name, vmin, vmax, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param name:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Set vmin and vmax
self.add_settings(name, vmin=vmin, vmax=vmax)
# Set flags
if soft_vmin is not None: self.set_setting(name, "soft_vmin", soft_vmin)
if soft_vmax is not None: self.set_setting(name, "soft_vmax", soft_vmax)
# ------------------------------------------------------------------------------
def get_vmin_vmax(self, frame, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Defined?
has_vmin = vmin is not None
has_vmax = vmax is not None
# Vmin and vmax don't have to be calculated
if has_vmin and has_vmax and (not soft_vmin) and (not soft_vmax): return vmin, vmax
# Calculate vmin and or vmax
return get_vmin_vmax(frame.data, interval=self.config.interval, zmin=vmin, zmax=vmax, soft_zmin=soft_vmin, soft_zmax=soft_vmax)
# ------------------------------------------------------------------------------
def get_residual_vmin_vmax(self, frame, amplitude=None, soft_amplitude=False):
"""
This function ...
:param frame:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Defined?
if amplitude is not None and not soft_amplitude:
if self.config.absolute: return 0., amplitude
else: return -amplitude, amplitude
# Calculate vmin and or vmax
if self.config.absolute: return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=0, zmax=amplitude, soft_zmin=False, soft_zmax=soft_amplitude)
else:
zmin = -amplitude if amplitude is not None else None
zmax = amplitude
return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=zmin, zmax=zmax, soft_zmin=soft_amplitude, soft_zmax=soft_amplitude, around_zero=True, symmetric=True)
# ------------------------------------------------------------------------------
def get_observation_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 0
if self.horizontal: return 0, index
# Vertical
#elif self.vertical: return 0, index
elif self.vertical: return index, 0
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_model_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 1
if self.horizontal: return 1, index
# Vertical
#elif self.vertical: return 1, index
elif self.vertical: return index, 1
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_residuals_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 2
if self.horizontal: return 2, index
# Vertical
#elif self.vertical: return 2, index
elif self.vertical: return index, 2
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_distribution_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 3
if self.horizontal: return 3, index
# Vertical
#elif self.vertical: return 3, index
elif self.vertical: return index, 3
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_observation_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_observation_row_col(index)
#print(self.grid.get_geometry())
#print(self.grid.get_height_ratios())
# Return the grid spec
#if return_row_col: return self.grid[row, col], row, col
#else: return self.grid[row, col]
#if return_row_col: return self.grid[index], row, col
#else: return self.grid[index]
# No, no, this was a mistake with 'get_observation_row_col'
#if return_row_col: return self.grid[col, row], row, col # WHY?
#else: return self.grid[col, row] # WHY?
# This was right after all
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_model_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_model_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_residuals_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_residuals_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_distribution_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_distribution_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def create_observation_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_observation_spec(index, return_row_col=True)
#print(spec)
#print("ROW", row, "COL", col)
# Get coordinates of the subplot
#points = spec.get_position(self.figure.figure).get_points()
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
# needs [xmin, ymin, dx, dy]
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_model_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_model_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_residuals_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_residuals_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def _plot_observation(self, index, frame, cmap, label=None, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param label:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_observation_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color or frame
plot.frame.set_color(self.frame_color)
# FOR FIRST
#f1._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#f1._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# Tick settings
plot._ax2.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax2.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# Set image background color
plot.set_nan_color(self.nan_color)
# FOR FIRST
#f1._ax1.scatter(ra, dec, marker='.', label='Observation')
# FOR FIRST
#legend1 = f1._ax1.legend(loc='upper right', fontsize=12, fancybox=True, framealpha=0, numpoints=None)
#plt.setp(legend1.get_texts(), color=config.text_color_in)
# Set title
if label is not None: plot._ax1.set_title(label, fontsize=self.config.label_fontsize)
# Return the vmin and vmax
return vmin, vmax
# ------------------------------------------------------------------------------
def _plot_model(self, index, frame, cmap, vmin=None, vmax=None, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param index:
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_model_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
#f6._ax1.scatter(ra, dec, marker='.', label='Model')
#legend6 = f6._ax1.legend(loc='upper right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend6.get_texts(), color=config.text_color_in)
# Set image background color
plot.set_nan_color(self.nan_color)
# ------------------------------------------------------------------------------
def _plot_residuals(self, index, frame, cmap, amplitude=None, soft_amplitude=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Create the plot
plot = self.create_residuals_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_residual_vmin_vmax(frame, amplitude=amplitude, soft_amplitude=soft_amplitude)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
# f11._ax1.scatter(ra, dec, marker='.', label='Relative \nResidual')
# FOR FIRST
# Set legend
#legend11 = f11._ax1.legend(loc='lower right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend11.get_texts(), color=config.text_color_in)
# Set background color
plot.set_nan_color(self.background_color)
# ------------------------------------------------------------------------------
def _plot_distribution(self, index, distribution):
"""
This function ...
:param index:
:param distribution:
:return:
"""
pass
# ------------------------------------------------------------------------------
def plot_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the observed image frames ...")
# Loop over the names
#print(self.names)
#print(self.nimages)
#print(len(self.names))
for index, name in enumerate(self.names):
# Debugging
log.debug("Plotting the observed frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the observation
frame = self.get_observation(name)
# Get the label for this image
label = self.get_label(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
vmin, vmax = self._plot_observation(index, frame, cmap, label=label, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set new vmin and vmax (for corresponding model)
self.set_limits(name, vmin, vmax, soft_vmin=False, soft_vmax=False)
# ------------------------------------------------------------------------------
def plot_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the model image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_model(name): continue
# Debugging
log.debug("Plotting the model frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the model
frame = self.get_model(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
self._plot_model(index, frame, cmap, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# ------------------------------------------------------------------------------
def plot_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_residuals(name): continue
# Debugging
log.debug("Plotting the residuals frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the residuals
frame = self.get_residuals(name)
# Get the colormap for this residual map
cmap = self.get_residual_colormap(name)
# Get the amplitude
amplitude, soft_amplitude = self.get_residual_amplitude(name)
# Plot
# index, frame, cmap, amplitude=None, soft_amplitude=False
self._plot_residuals(index, frame, cmap, amplitude=amplitude, soft_amplitude=soft_amplitude)
# ------------------------------------------------------------------------------
def plot_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual distributions ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_distribution(name): continue
# Debugging
log.debug("Plotting the residual distribution of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + " ) ...")
# Get the distribution
distribution = self.get_distribution(name)
# ------------------------------------------------------------------------------
def finish(self):
"""
This function ...
:param self:
:return:
"""
# Draw
self.figure.draw()
# Save to file
if self.config.path is not None: self.figure.figure.savefig(self.config.path, dpi=self.config.dpi)
# Show
else: plt.show()
# Close
#plt.close(fig)
plt.close()
# ------------------------------------------------------------------------------
def plot_images_aplpy(frames, filepath=None, center=None, radius=None, xy_ratio=None, dark=False, scale="log",
colormap="inferno", nrows=None, ncols=None, orientation="horizontal", plotsize=3., distance=None,
share_scale=None, descriptions=None, minmax_scaling=0.5):
"""
This function ...
:param frames:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param colormap:
:param nrows:
:param ncols:
:param orientation:
:param plotsize:
:param distance:
:param share_scale:
:param descriptions:
:param minmax_scaling: 0.5
:return:
"""
import matplotlib.gridspec as gridspec
#from matplotlib.colorbar import ColorbarBase
#from matplotlib.colors import LinearSegmentedColormap
#from matplotlib.colors import Normalize
from pts.magic.tools import plotting
# Set
set_theme(dark=dark)
nimages = len(frames)
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
#print("plotsize", xsize, ysize)
# Determine the number of columns and rows
if nrows is None and ncols is None:
if orientation == "horizontal": ncols, nrows = nimages, 1
elif orientation == "vertical": ncols, nrows = 1, nimages
else: raise ValueError("Invalid orientation: '" + orientation + "'")
# Nrows is none but ncols is not
elif nrows is None: ncols = numbers.round_up_to_int(nimages/nrows)
# Ncols is none but nrows is not
elif ncols is None: nrows = numbers.round_up_to_int(nimages/ncols)
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figsize", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nrows, ncols) # nimages ROWS, 4 COLUMNS
# gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Get frame labels
if types.is_dictionary(frames):
labels = frames.keys()
frames = frames.values()
else: labels = [frame.filter_name for frame in frames]
# Set scale for each image
scales = dict()
if types.is_string_type(scale):
for label in labels: scales[label] = scale
elif types.is_sequence(scale):
for label, scalei in zip(labels, scale): scales[label] = scalei
elif types.is_dictionary(scale): scales = scale
else: raise ValueError("Invalid type for 'scale'")
# Initialize dict for intervals
intervals = dict()
# Set descriptions
if descriptions is None:
descriptions = dict()
for label in labels: descriptions[label] = None
elif types.is_sequence(descriptions):
descrpts = descriptions
descriptions = dict()
for label, descr in zip(labels, descrpts): descriptions[label] = descr
elif types.is_dictionary(descriptions): pass # OK
else: raise ValueError("Invalid type for 'descriptions'")
# Set minmax scaling
if types.is_real_type(minmax_scaling):
factor = minmax_scaling
minmax_scaling = dict()
for label in labels: minmax_scaling[label] = factor
elif types.is_dictionary(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label in labels:
if label in minmax_scaling_orig: minmax_scaling[label] = minmax_scaling_orig[label]
else: minmax_scaling[label] = 0.5
elif types.is_sequence(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label, factor in zip(labels, minmax_scaling_orig): minmax_scaling[label] = factor
else: raise ValueError("Invalid type for 'minmax_scaling'")
# Loop over the frames
for label, frame, index in zip(labels, frames, range(nimages)):
rowi = index // ncols
coli = index % ncols
is_first_row = rowi == 0
is_last_row = rowi == nrows - 1
is_first_col = coli == 0
is_last_col = coli == ncols - 1
#print("row", rowi)
#print("col", coli)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the '" + label + "' image ...")
# Get HDU
hdu = frame.to_hdu()
# Get interval
if share_scale is not None and label in share_scale:
share_with = share_scale[label]
vmin, vmax = intervals[share_with]
scalei = scales[share_with]
else:
# Get scale
scalei = scales[label]
is_logscale = scalei == "log"
#print(label, minmax_scaling[label])
vmin, vmax = plotting.get_vmin_vmax(frame.data, logscale=is_logscale, minmax_scaling=minmax_scaling[label])
# Set interval
intervals[label] = (vmin, vmax,)
# Set title
if descriptions[label] is not None: title = descriptions[label]
else: title = label.replace("_", "\_").replace("um", "$\mu$m")
# Has sky coordinate system?
has_wcs = frame.has_wcs and frame.wcs.is_sky
# OBSERVATION
figi = aplpy.FITSFigure(hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(figi, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scalei, has_wcs=has_wcs)
set_ticks(figi, is_first_row, is_last_row)
# FIRST COLUMN
if is_first_col:
figi.tick_labels.show_y()
figi.axis_labels.show_y()
# LAST ROW
if is_last_row:
figi.tick_labels.show_x()
figi.axis_labels.show_x()
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset
reset_theme()
# ------------------------------------------------------------------------------
def plot_one_residual_aplpy(observation, model, residual=None, path=None, scale="log", plotsize=3., dark=False,
center=None, radius=None, xy_ratio=None, first_label="Observation", second_label="Model",
residual_label="Residual", filter_label=True):
"""
This function ...
:param observation:
:param model:
:param residual:
:param path:
:param scale:
:param plotsize:
:param dark:
:param center:
:param radius:
:param xy_ratio:
:param first_label:
:param second_label:
:param residual_label:
:param filter_label:
:return:
"""
# Make residual?
if residual is None: residual = (model - observation) / observation
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
import matplotlib.gridspec as gridspec
from pts.magic.tools import plotting
# Set theme
set_theme(dark=dark)
nrows = 1
ncols = 3
xsize = plotsize
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
# Create figure with appropriate size
#fig = plt.figure(figsize=(figxsize, figysize))
figure = MPLFigure(size=(figxsize,figysize))
# Create grid
gs1 = gridspec.GridSpec(1, 4) # nimages ROWS, 4 COLUMNS
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Percentual residuals
residual = residual * 100.
# Set title
if filter_label and observation.has_filter: title = str(observation.filter).replace("um", " $\mu$m")
else: title = first_label
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
# Get interval
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale, has_wcs=observation.has_celestial_wcs)
set_ticks(fig1, True, True)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig1.tick_labels.show_x()
fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label=second_label, center=center, radius=radius, scale=scale, has_wcs=model.has_celestial_wcs)
set_ticks(fig2, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig2.tick_labels.show_x()
fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label=residual_label + ' (\%)', center=center, radius=radius, has_wcs=residual.has_celestial_wcs)
set_ticks(fig3, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig3.tick_labels.show_x()
fig3.axis_labels.show_x()
# Show or save
if path is None: figure.show()
else: figure.saveto(path)
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def plot_residuals_aplpy(observations, models, residuals, filepath=None, center=None, radius=None, xy_ratio=None,
dark=False, scale="log", plotsize=3., distance=None, mask_simulated=False, masks=None):
"""
This function ...
:param observations:
:param models:
:param residuals:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param plotsize:
:param distance:
:param mask_simulated:
:param masks: if passed, both observations, models and residuals are masked
:return:
"""
import numpy as np
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import Normalize
import seaborn as sns
# Set theme
set_theme(dark=dark)
nimages = len(observations)
ncols = 4
nrows = nimages
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
# Set individual map plot size
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
#print("individual size", xsize, ysize)
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figure size", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nimages, 4) # nimages ROWS, 4 COLUMNS
#gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Loop over the filters
if masks is None: masks = [None] * nimages
for observation, model, residual, mask, index in zip(observations, models, residuals, masks, range(nimages)):
#print("units:")
#print(observation.unit)
#print(model.unit)
observation.convert_to("mJy/sr", distance=distance)
model.convert_to("mJy/sr", distance=distance)
# MASK MODEL
if mask_simulated:
model.rebin(observation.wcs)
model.apply_mask_nans(observation.nans)
# MASK ALL?
if mask is not None:
observation.apply_mask_nans(mask)
model.apply_mask_nans(mask)
residual.apply_mask_nans(mask)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the observation, model and residuals for the " + str(observation.filter) + " filter ...")
# Percentual residuals
residual = residual * 100.
# Set title
title = str(observation.filter).replace("um", " $\mu$m")
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
from pts.magic.tools import plotting
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
#vmax = 0.7 * vmax
#print("VMIN", vmin)
#print("VMAX", vmax)
# ------------------------------------------------------------------------------
# Plot obs, model and residual
# ------------------------------------------------------------------------------
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale)
set_ticks(fig1, is_first, is_last)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig1.tick_labels.show_x()
if is_last: fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label='Model', center=center, radius=radius, scale=scale)
set_ticks(fig2, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig2.tick_labels.show_x()
if is_last: fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label='Residual (\%)', center=center, radius=radius)
set_ticks(fig3, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig3.tick_labels.show_x()
if is_last: fig3.axis_labels.show_x()
# ------------------------------------------------------------------------------
# COLORBAR
colorbar_start_x = gs1[plot_idx].get_position(fig).bounds[0] + 0.025
colorbar_start_y = gs1[plot_idx].get_position(fig).bounds[1] + 0.085 / (nimages)
colorbar_x_width = gs1[plot_idx].get_position(fig).bounds[2] - 0.05
colorbar_y_height = gs1[plot_idx].get_position(fig).bounds[3]
cb_ax = fig.add_axes([colorbar_start_x, colorbar_start_y, colorbar_x_width, (0.02 + 0.002) / (nimages + 1)])
# Colourbar
cb = ColorbarBase(cb_ax, cmap=residual_colormap, norm=Normalize(vmin=-100, vmax=100), orientation='horizontal')
cb.ax.xaxis.set_ticks_position('bottom')
cb.ax.xaxis.set_label_position('bottom')
cb.ax.zorder = 99
cb.ax.xaxis.set_tick_params(color='white')
cb.outline.set_edgecolor('white')
plt.setp(plt.getp(cb.ax.axes, 'yticklabels'), color='white')
plt.setp(plt.getp(cb.ax.axes, 'xticklabels'), color='white')
cb.set_ticks([-100, -50, 0, 50, 100])
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# KDE Plot of residuals
residual = residual_hdu.data
fig4 = plt.subplot(gs1[plot_idx])
residuals_to_kde = np.where((residual <= 200) & (residual >= -200))
if dark:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='white', shade=True)
fig4.axes.set_facecolor("black")
else:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='k', shade=True)
fig4.axes.set_facecolor("white")
fig4.tick_params(labelleft='off')
plt.xlim([-150, 150])
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
# Hide tick labels except for the last (bottom) plot
if not is_last: fig4.tick_params(labelbottom=False)
if dark: plt.axvline(0, c='white', ls='--', lw=2)
else: plt.axvline(0, c='k', ls='--', lw=2)
# Label for kde
plt.xlabel('Residual (\%)')
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def setup_map_plot(figure, colormap, vmin, vmax, label, smooth=None,text_x=0.05, text_y=0.95, center=None,
radius=None, scale="linear", has_wcs=True):
"""
This function ...
:param figure:
:param colormap:
:param vmin:
:param vmax:
:param label:
:param smooth:
:param text_x:
:param text_y:
:param center:
:param radius:
:param scale:
:param has_wcs:
:return:
"""
figure.show_colorscale(cmap=colormap, vmin=vmin, vmax=vmax, smooth=smooth, stretch=scale)
#figure.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm:ss')
if has_wcs:
figure.tick_labels.set_xformat('hh:mm:ss')
figure.tick_labels.set_yformat('dd:mm:ss')
figure._ax1.set_facecolor('black')
figure.set_nan_color('black')
# RECENTER
if center is not None:
if radius is None: raise ValueError("Cannot specify center without radius")
if has_wcs: figure.recenter(center.ra.to("deg").value, center.dec.to("deg").value, radius=radius.to("deg").value)
else: figure.recenter(center.x, center.y, radius=radius)
# Hide axes labels and tick labels by default (enable for y for first column and for x for last row)
figure.axis_labels.hide()
figure.tick_labels.hide()
# Axes spines
figure._ax1.spines['bottom'].set_color('white')
figure._ax1.spines['top'].set_color('white')
figure._ax1.spines["left"].set_color("white")
figure._ax1.spines["right"].set_color("white")
# TICKS
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# SET LABEL
figure.add_label(text_x, text_y, r'' + str(label), relative=True, size=13, weight='bold', color='white',
horizontalalignment='left', verticalalignment='top',
bbox=dict(facecolor='black', edgecolor='none', alpha=0.5))
# ------------------------------------------------------------------------------
def set_ticks(figure, is_first_row, is_last_row):
"""
This function ...
:param figure:
:param is_first_row:
:param is_last_row:
:return:
"""
# ONLY ROW?
is_only_row = is_first_row and is_last_row
# ONLY
if is_only_row:
# IN EVERYWHERE
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# FIRST
elif is_first_row:
# LEFT, RIGHT AND TOP
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=False, left=True)
# LAST
elif is_last_row:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# BOTTOM, LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# In between
else:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=False, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=False, right=True, bottom=False, left=True)
# ------------------------------------------------------------------------------
def set_theme(dark=False):
"""
This function ...
:param dark:
:return:
"""
# General settings
plt.rcParams["axes.labelsize"] = 14 # 16 #default 20
plt.rcParams["xtick.labelsize"] = 8 # 10 #default 16
plt.rcParams["ytick.labelsize"] = 8 # 10 #default 16
plt.rcParams["legend.fontsize"] = 14 # 10 #default 14
plt.rcParams["legend.markerscale"] = 0
plt.rcParams["lines.markersize"] = 2.5 # 4 #default 4
plt.rcParams["axes.linewidth"] = 1
# Colors
if dark:
plt.rcParams['axes.facecolor'] = 'black'
plt.rcParams['savefig.facecolor'] = 'black'
plt.rcParams['axes.edgecolor'] = 'white'
plt.rcParams['xtick.color'] = 'white'
plt.rcParams['ytick.color'] = 'white'
plt.rcParams["axes.labelcolor"] = 'white'
plt.rcParams["text.color"] = 'white'
else:
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['savefig.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams["axes.labelcolor"] = 'black'
plt.rcParams["text.color"] = 'black'
# ------------------------------------------------------------------------------
def reset_theme():
"""
This function ...
:return:
"""
# Back to original settings
plt.rcParams.update(plt.rcParamsDefault)
# ------------------------------------------------------------------------------
| agpl-3.0 |
jalonsob/Informes | vizGrimoireJS/utils.py | 3 | 5609 | #!/usr/bin/env python
# Copyright (C) 2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# This file is a part of the vizGrimoire.R package
#
# Authors:
# Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
from optparse import OptionParser
def read_options():
# Generic function used by report_tool.py and other tools to analyze the
# information in databases. This contains a list of command line options
parser = OptionParser(usage="usage: %prog [options]",
version="%prog 0.1")
parser.add_option("-d", "--database",
action="store",
dest="dbname",
help="Database where information is stored")
parser.add_option("-u","--dbuser",
action="store",
dest="dbuser",
default="root",
help="Database user")
parser.add_option("-p","--dbpassword",
action="store",
dest="dbpassword",
default="",
help="Database password")
parser.add_option("-g", "--granularity",
action="store",
dest="granularity",
default="months",
help="year,months,weeks granularity")
parser.add_option("-o", "--destination",
action="store",
dest="destdir",
default="../../../json",
help="Destination directory for JSON files")
parser.add_option("-s", "--start",
action="store",
dest="startdate",
default="1900-01-01",
help="Start date for the report")
parser.add_option("-e", "--end",
action="store",
dest="enddate",
default="2100-01-01",
help="End date for the report")
parser.add_option("-i", "--identities",
action="store",
dest="identities_db",
help="Database with unique identities and affiliations")
parser.add_option("--npeople",
action="store",
dest="npeople",
default="10",
help="Limit for people analysis")
parser.add_option("-c", "--config-file",
action="store",
dest="config_file",
default = "../../../conf/main.conf",
help="Automator config file")
parser.add_option("--data-source",
action="store",
dest="data_source",
help="data source to be generated")
parser.add_option("--study",
action="store",
dest="study",
help="study to be generated")
parser.add_option("--filter",
action="store",
dest="filter",
help="filter to be generated")
parser.add_option("--no-filters",
action="store_true",
dest="no_filters",
help="don't generate filters metrics")
parser.add_option("--item",
action="store",
dest="item",
help="Select an item filter from data source to be generated.")
parser.add_option("-m", "--metrics",
action="store",
dest="metrics_path",
default = "../vizgrimoire/metrics",
help="Path to the metrics modules to be loaded")
parser.add_option("-t", "--type",
action="store",
dest="backend",
default="bugzilla",
help="Type of backend: bugzilla, allura, jira, github")
parser.add_option("--metric",
action="store",
dest="metric",
help="Select metric from data source to be generated.")
parser.add_option("--events",
action="store_true",
dest="events",
help="Generate events.")
(opts, args) = parser.parse_args()
if len(args) != 0:
parser.error("Wrong number of arguments")
if opts.config_file is None:
if not(opts.dbname and opts.dbuser and opts.identities_db):
parser.error("--database --db-user and --identities are needed")
if opts.metrics_path is None:
parser.error("--metrics path is needed.")
if opts.metric and opts.data_source is None:
parser.error("--metric need also --data-source.")
if opts.item and opts.filter is None:
parser.error("--item need also --filter.")
return opts
| gpl-3.0 |
glamp/coffe2py | main.py | 1 | 1282 | import sys
from IPython.core.interactiveshell import InteractiveShell
import pandasjson as json
import StringIO
if __name__=="__main__":
mode = "ipython"
line = sys.stdin.readline()
shell = InteractiveShell()
while line:
# explicitly write to stdout
sys.stdout.write(line)
sys.stdout.flush()
# handle incoming data, parse it, and redirect
# stdout so it doesn't interfere
line = sys.stdin.readline()
data = json.loads(line)
codeOut = StringIO.StringIO()
sys.stdout = codeOut
try:
code = data["code"]
if data.get("autocomplete")==True:
_, completions = shell.complete(code)
print json.dumps(completions)
elif code.startswith("print"):
#exec(code)
shell.ex(code)
else:
try:
#print repr(eval(code))
print repr(shell.ev(code))
except:
#exec(code)
shell.ex(code)
except Exception, e:
pass
sys.stdout = sys.__stdout__
data["result"] = codeOut.getvalue()
sys.stdout.write(json.dumps(data) + "\n")
sys.stdout.flush() | bsd-2-clause |
javachengwc/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Classifiers.py | 37 | 39160 | #._cv_part guppy.heapy.Classifiers
class Classifier:
def __init__(self, mod, name, cli=None, supers=(), depends=(), with_referrers=False):
self.mod = mod
self.name = name
if cli is not None:
self.cli = cli
# Set of all super-classifiers (including self).
# The partial order is defined in Notes Aug 30 2005.
self.super_classifiers = mod.ImpSet.immnodeset([self])
if supers:
for s in supers:
self.super_classifiers |= s.super_classifiers
else:
# The Unity classifier is super of all, but we must add it only
# if not supers specified; init of ByUnity itself depends on this.
self.super_classifiers |= [mod.Use.Unity.classifier]
# The classifiers that self depends on.
for d in depends:
if d.with_referrers:
with_referrers = True
break
# True if we need to setup referrers before calling (the) low-level classifier.
self.with_referrers = with_referrers
if with_referrers:
self.call_with_referrers = mod.View.call_with_referrers
def call_with_referrers(self, x, f):
# Default is to not use referrers.
return f(x)
def _get_cli(self): # This is not redefined by subclass unless they set cli property.
return self.get_cli() # This may be defined by subclass w/o setting cli property.
cli = property(_get_cli)
def get_alt(self, kind, alt):
# Get alternative kind for a kind with self as fam.classifier.
# assert kind.fam.classifier is self
return self.mod.alt(kind, alt)
def get_dictof(self, kind):
name = '%s.dictof'%self.name
er = self.mod.mker_memoized(
name,
lambda:
self.mod._er_by_(ByDictOwner, self.mod, name, self))
return er.classifier.dictof(kind)
def get_kind(self, k):
# Make an equivalence class from low-level classification
return self.family(k)
def get_kindarg(self, kind):
# Inverse of get_kind
cla, ka, cmp = kind.get_ckc()
if cla is not self:
raise ValueError, 'get_kindarg: argument with classifier %r expected'%self
return ka
def get_reprname(self):
return '%s%s'%(self.mod.Use.reprefix, self.name)
def get_sokind(self, er, *args, **kwds):
k = er(*args, **kwds)
return CallableSoKind(er, (k,))
def get_sokindrepr(self, sokind):
# Get the representation of a set of kinds
# from this classifier / eqv. relation.
if 0:
return '%ssokind(%s)'%(self.mod.Use.reprefix,
', '.join(['%r'%kind for kind in sokind.kinds]))
return '%s.sokind%s'%(self.get_reprname(),
''.join(['(%s)'%self.get_userkindargrepr(k)
for k in sokind.kinds]))
def get_tabheader(self, ctx=''):
# If ctx = 'and', get the table header when used as a part of the 'and' classifier.
# It is sometimes a more compact or parenthesised version of the usual tab header.
return self.get_byname()
def get_tabrendering(self, cla, ctx=''):
# If ctx = 'and', get the table rendering when used as a part of the 'and' classifier
# sometimes we want to enclose something in parenthesises.
return cla.brief
def get_userkind(self, *args, **kwds):
# Make a kind from user-level arguments
return self.family(*args, **kwds)
def get_userkindarg(self, kind):
return kind.arg
def get_userkindargrepr(self, kind):
return repr(self.get_userkindarg(kind))
def partition(self, iterable):
items = []
for k, v in self.partition_cli(iterable):
k = self.get_kind(k)
v = self.mod.Use.idset(v, er=self.er)
items.append((k, v))
return items
def partition_cli(self, a):
ep = self.call_with_referrers(
a,
self.cli.epartition)
return [(k, ep[k]) for k in ep.get_domain()]
def relimg(self, X):
p = self.partition_cli(X)
kinds = [self.get_kind(k) for k, v in p] # could be more efficient
return self.mod.Use.union(kinds, maximized=1)
def select_cli(self, a, b, cmp='=='):
return self.call_with_referrers(
a,
lambda a: self.cli.select(a, b, cmp))
def select_ids(self, X, k, alt=None):
r = self.mod.Use.idset(self.select_cli(X.nodes, k, alt))
return r
class SoKind(object):
def __init__(self, er, kinds):
self.er = er
self.classifier = er.classifier
self.kinds = kinds
self.clikinds = self.classifier.mod.ImpSet.immnodeset(
[self.classifier.get_kindarg(kind) for kind in kinds])
def __eq__(self, other):
if not isinstance(other, SoKind):
return False
if self.classifier != other.classifier:
return False
a = self.classifier.mod.Use.union(self.kinds)
b = self.classifier.mod.Use.union(other.kinds)
return a == b
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return self.classifier.get_sokindrepr(self)
def _get_refdby(self):
return self.er.refdby(self)
refdby = property(_get_refdby)
class CallableSoKind(SoKind):
def __call__(self, *args, **kwds):
k = self.er(*args, **kwds)
return self.__class__(self.er, self.kinds + (k,))
class SoNoKind(SoKind):
def __repr__(self):
return '%s%s'%(self.classifier.mod.Use.reprefix, 'sonokind')
class QuickSoKind(SoKind):
# Quicker to make than SoKind,
# when clikinds is available but not kinds.
__slots__ = 'classifier', 'clikinds'
def __init__(self, classifier, clikinds):
self.classifier = classifier
self.clikinds = clikinds
def _get_er(self):
return self.classifier.er
er = property(_get_er)
def _get_kinds(self):
return tuple([self.classifier.get_kind(k) for k in self.clikinds])
kinds = property(_get_kinds)
class IdentityFamily:
# Holds a single object node
def __init__(self, mod, classifier):
self.defrefining(mod.Use.Anything)
self.classifier = classifier
def _ge_ATOM(self, a, b):
# b is known to not be Nothing since its c_le doesn't call back
if self is b.fam:
return a.arg is b.arg
return b.fam.supercl is not None and b.fam.supercl <= a
def _le_ATOM(self, a, b):
# b is known to not be Nothing since its c_ge doesn't call back
if self is b.fam:
return a.arg is b.arg
return self.supercl is not None and self.supercl <= b
def c_contains(self, a, b):
return b is a.arg
def _and_ID(self, a, b):
# Just a possible optimization
return self.mod.Use.idset(b.nodes & [a.arg])
def c_get_brief(self, a):
return '<id %s>'%hex(id(a.arg))
def c_repr(self, a):
return '%s(%s)'%(self.classifier.get_reprname(), self.classifier.get_userkindargrepr(a))
class ByIdentity(Classifier):
def __init__(self, mod, name):
Classifier.__init__(self, mod, name, mod.hv.cli_id())
self.family = mod.fam_mixin_argatom(IdentityFamily, self)
# self.super_classifiers = mod.Use.Anything # Replace whatever Classifer had set it to
def get_byname(self):
return 'object identity'
def get_tabheader(self, ctx=''):
return 'Object Identity'
def get_userkind(self, address):
return self.get_kind(self.mod.View.obj_at(address))
def get_userkindarg(self, kind):
return id(kind.arg)
def get_userkindargrepr(self, kind):
return hex(self.get_userkindarg(kind))
class ByIdentitySet(Classifier):
# Classification is, conceptually, a singleton immnodeset of each object
# What this is used to is:
# to be able to use an iso() set as a kind
# combined with other classifiers eg in dictof, biper
# The ckc returned from an iso is then
# this classifier, nodes of iso, '<='
# The cmp indicates subset
# select thus selects every object for which it singleton is a subset of the set given
# which is optimized to select the object that are members of that set
# and may be optimized at higher levels to invoke the low-level set intersection
def __init__(self, mod, name):
Classifier.__init__(self, mod, name, mod.hv.cli_idset())
self.family = mod.Use.idset
# self.super_classifiers = mod.Use.Anything # Replace whatever Classifer had set it to
def get_byname(self):
return 'by identity set'
def get_userkind(self, node):
return self.family(self.mod.ImpSet.immnodeset([node]))
def relimg(self, X):
p = self.partition_cli(X)
k = self.mod.ImpSet.immnodeset_union([k for k, v in p])
return self.family(k)
class PyObjectFamily:
def __init__(self, mod, classifier):
self.classifier = classifier
def c_contains(self, a, b):
return True
def c_get_idpart_header(self, a):
return 'Kind: Name/Value/Address'
def c_get_idpart_label(self, a):
return ''
def c_get_idpart_render(self, a):
def render(x):
x = self.mod.Use.iso(x)
r = x.brief.lstrip('<1 ').rstrip('>')
return r
return render
def c_get_brief(self, a):
return '<Anything>'
def c_repr(self, a):
return '%s%s'%(self.mod.Use.reprefix, 'Anything')
def _and_ID(self, a, b):
# Optimization shortcut
# shcould be made in classifer.select
return b
class ByUnity(Classifier):
"""byunity
Classify by <unity>.
The classification returned for every object is <Anything>."""
def __init__(self, mod, name):
Classifier.__init__(self, mod, name, mod.hv.cli_none(),
supers=[self] # Must make it avoid referring to Unity !
)
self.family = mod.fam_mixin_argatom(PyObjectFamily, self)
def get_byname(self):
return 'unity'
def get_tabheader(self, ctx=''):
return '<unclassified>'
def get_userkind(self, *args):
return self.mod.Use.Anything
def get_userkindarg(self, kind):
return None
class IndiSizeFamily:
def __init__(self, mod, classifier):
self.defrefining(mod.Use.Anything)
self.classifier = classifier
def __call__(self, a):
a = int(a)
return self.mod.AtomFamily.__call__(self, a)
def c_alt(self, a, alt):
return self.classifier.get_alt(a, alt)
def c_contains(self, a, b):
return a.arg == self.classifier.cli.classify(b)
def c_get_render(self, a):
return self.mod.summary_str(a.arg)
def c_get_brief(self, a):
return '<size = %d>'%a.arg
def c_get_brief_alt(self, a, alt):
return '<size %s %d>'%(alt, a.arg)
def c_repr(self, a):
return '%s(%s)'%(self.classifier.get_reprname(), a.arg)
class ByIndiSize(Classifier):
"""byindisize
Classify by <individual size>.
The classification will be individual memory size of the object."""
def __init__(self, mod, name):
Classifier.__init__(self, mod, name)
self.family = mod.fam_mixin_argatom(IndiSizeFamily, self)
def get_byname(self):
return 'individual size'
def get_cli(self):
return self.mod.hv.cli_indisize({})
def get_tabheader(self, ctx=''):
return 'Individual Size'
def get_tabrendering(self, cla, ctx=''):
if ctx:
return '%d'%cla.arg
else:
return '%9d'%cla.arg
class TypeFamily:
def __init__(self, mod, classifier):
self.defrefining(mod.Use.Anything)
self.classifier = classifier
self.range = mod.fam_Family(self)
self.TypeType = mod.types.TypeType
def __call__(self, a):
if not isinstance(a, self.TypeType):
raise TypeError, "Argument should be a type."
return self.Set(self, a)
def c_alt(self, a, alt):
return self.classifier.get_alt(a, alt)
def c_contains(self, a, b):
return type(b) is a.arg
def c_get_render(self, a):
return self.mod.summary_str(a.arg)
def c_get_brief(self, a):
return self.mod.summary_str(type(a.arg)) (a.arg)
def c_get_brief_alt(self, a, alt):
x = {
'<' : 'strict subtype',
'<=' : 'subtype',
'>=' : 'supertype',
'>' : 'strict supertype'
}[alt]
return '<%s of %s>'%(x, self.c_get_brief(a))
def c_repr(self, a):
return self.classifier.get_repr(a)
class ByType(Classifier):
"""bytype
Classify by <type>.
The classification will be the type of the object."""
def __init__(self, mod, name):
Classifier.__init__(self, mod, name, mod.hv.cli_type())
self.family = mod.fam_mixin_argatom(TypeFamily, self)
def get_attr_for_er(self, name):
return self.get_userkind(getattr(self.mod.types, name+'Type'))
def get_byname(self):
return 'type'
def get_repr(self, kind):
t = kind.arg
rn = self.get_reprname()
if t in self.mod.invtypemod:
return '%s.%s'%(rn, self.mod.invtypemod[t])
else:
return '%s(%r)'%(rn, self.get_userkindarg(kind))
def get_tabheader(self, ctx=''):
return 'Type'
def get_userkind(self, kind):
kind = self.mod.tc_adapt(kind)
return self.family(kind)
def get_userkindarg(self, kind):
# A representation that is a valid userkind arg.
return self.mod.Use.tc_repr(kind.arg)
class ClassFamily:
def __init__(self, mod, classifier):
self.classifier = classifier
self.InstanceType = mod.types.InstanceType
self.ClassType = mod.types.ClassType
self.defrefidis(mod.Use.Type(self.InstanceType))
def __call__(self, a):
if not isinstance(a, self.ClassType):
raise TypeError, "Argument should be a class (of type types.ClassType)."
return self.mod.AtomFamily.__call__(self, a)
def c_alt(self, a, alt):
return self.classifier.get_alt(a, alt)
def c_contains(self, a, b):
return type(b) is self.InstanceType and b.__class__ is a.arg
def c_get_brief(self, a):
return '%s.%s'%(a.arg.__module__, a.arg.__name__)
def c_get_brief_alt(self, a, alt):
x = {
'<' : 'strict subclass',
'<=' : 'subclass',
'>=' : 'superclass',
'>' : 'strict superclass'
}[alt]
return '<%s of %s>'%(x, self.c_get_brief(a))
def c_repr(self, a):
return '%s(%r)'%(self.classifier.get_reprname(), self.mod.Use.tc_repr(a.arg))
class ByClass(Classifier):
"""byclass
Classify by 'class', in the following sense.
An object is classified as follows:
1. If the object is of type InstanceType, the
classification will be its class.
2. The classification will be the type of the object.
This is like the __class__ attribute in newer Python, except it
doesn't change if some type redefines the __class__ attribute.
"""
def __init__(self, mod, name):
sup = mod.Use.Type.classifier
Classifier.__init__(self, mod, name, mod.hv.cli_class(), supers = [sup])
self.fam_Class = mod.fam_mixin_argatom(ClassFamily, self)
self.ClassType = self.fam_Class.ClassType
self.TypeType = mod.types.TypeType
self.type_get_kind = sup.get_kind
def get_byname(self):
return 'class'
def get_kind(self, kind):
if isinstance(kind, self.ClassType):
return self.fam_Class(kind)
else:
return self.type_get_kind(kind)
def get_kindarg(self, kind):
if kind.fam is self.fam_Class:
return kind.arg
else:
return self.mod.Use.Type.classifier.get_kindarg(kind)
def get_tabheader(self, ctx=''):
return 'Class'
def get_userkind(self, kind):
kind = self.mod.tc_adapt(kind)
try:
return self.get_kind(kind)
except TypeError:
raise TypeError, 'Argument should be a class or type.'
def get_userkindarg(self, kind):
return self.mod.Use.tc_repr(kind.arg)
class OwnedDictFamily:
def __init__(self, mod):
self.defrefidis(mod.Use.Type(self.types.DictType))
def _get_ownerkind(self, a):
return a.arg
def c_alt(self, a, alt):
return self(a.arg.alt(alt))
def c_get_render(self, a):
ka = self._get_ownerkind(a)
if ka is self.mod.Use.Nothing:
return self.mod.Use.Type.Dict.get_render()
else:
ownrender = ka.get_render()
def render(x):
ret = ownrender( self.mod.Use.iso(x).owners.theone )
if '.' in ret:
ret = '..'+ret.split('.')[-1]
return ret
return render
if ka == self.mod.fam_Type(self.types.ModuleType):
modrender = self.mod.Use.Type.Module.get_render()
def render(x):
return modrender( self.mod.Use.iso(x).owners.theone )
return render
else:
return self.mod.Use.Type.Dict.get_render()
def c_get_brief(self, a):
ka = self._get_ownerkind(a)
if ka is self.mod.Use.Nothing:
return 'dict (no owner)'
else:
return 'dict of ' + ka.brief
def c_get_ckc(self, a):
cla, k, cmp = a.arg.get_ckc()
if cmp != '==':
cla, k, cmp = a.arg.biper(0).get_ckc()
docla = cla.er.dictof.classifier
if a.arg is self.mod.Use.Nothing:
k = docla.notownedtag
return docla, k, cmp
def c_get_str_for(self, a, b):
return self.c_get_brief(a)
def c_get_idpart_render(self, a):
ka = self._get_ownerkind(a)
if ka is not self.mod.Use.Nothing:
owner_render = ka.fam.c_get_idpart_render(ka)
def render(x):
return owner_render(self.mod.Use.iso(x).owners.theone)
return render
else:
b = self.mod._parent.Spec.Type.Dict
return b.fam.c_get_idpart_render(b)
def c_get_idpart_header(self, a):
ka = self._get_ownerkind(a)
if ka is self.mod.Use.Nothing:
return 'Address*Length'
else:
return 'Owner ' + ka.fam.c_get_idpart_header(ka)
def c_repr(self, a):
ka = self._get_ownerkind(a)
ra = repr(ka)
if ra.startswith('~'):
ra = '(%s)'%ra
return '%s.dictof'%ra
class ByDictOwner(Classifier):
def __init__(self, mod, name, ownerclassifier):
Classifier.__init__(self, mod, name, depends=[ownerclassifier])
self.ownerclassifier = ownerclassifier
self.hv = mod.View.hv
self.ownership = mod.View.dict_ownership
self.family = mod.dictof
self.notdict = mod.notdict
self.dictofnothing = mod.dictofnothing
# Hashable unique tags
# Using sets methods since I dont want our hiding tag here!
# Confuses heapg. Note feb 3 2006
self.notdicttag = mod.ImpSet.immnodeset([[]])
self.notownedtag = mod.ImpSet.immnodeset([[]])
def get_byname(self):
return '[dict of] %s'%self.ownerclassifier.get_byname()
def get_cli(self):
cli = self.hv.cli_dictof(self.ownership, self.ownerclassifier.cli, self.notdicttag,
self.notownedtag)
return cli
def get_kind(self, k):
if k is self.notdicttag:
return self.notdict
elif k is self.notownedtag:
return self.dictofnothing
else:
return self.family(self.ownerclassifier.get_kind(k))
def get_kindarg(self, kind):
if kind is self.notdict:
return self.notdicttag
elif kind is self.dictofnothing:
return self.notownedtag
else:
return self.ownerclassifier.get_kindarg(kind.arg)
def get_tabheader(self, ctx=''):
return 'Dict of %s'%self.ownerclassifier.get_tabheader(ctx)
def get_tabrendering(self, kind, ctx=''):
if kind is self.notdict:
r = kind.brief
elif kind is self.dictofnothing:
r = 'dict (no owner)'
else:
r = 'dict of ' + self.ownerclassifier.get_tabrendering(kind.arg, ctx)
return r
def get_userkind(self, k):
if k is None:
return self.notdict
elif k is self.mod.Use.Nothing:
return self.dictofnothing
else:
return self.family(k)
def get_userkindarg(self, kind):
if kind is self.notdict:
return None
elif kind is self.dictofnothing:
return self.mod.Use.Nothing
else:
return kind.arg
def owners(self, X):
p = self.partition_cli(X.nodes)
ns = self.mod.ImpSet.mutnodeset()
drg = self.ownership
for k in X.nodes:
t = drg[k]
if not t:
self.mod.hv.update_dictowners(drg)
t = drg[k]
if t:
v = t[0]
if v is not None:
ns.add(v)
return self.mod.Use.idset(ns)
class ByClassOrDictOwner(Classifier):
"""byclodo
Classify by <type, class or dict owner>.
The classification is performed as follows:
1. If the object is an instance of a class, the
classification will be the class.
2. If the object is not a dictionary,
the classification will be the type of the object.
3. The object is a dictionary. The referrers of the
object are searched to find one that 'owns' the
dictionary. That is, typically, that the dict is
the __dict__ attribute of the owner. If no such
owner is found, the type 'dict' will be the
classification. If an owner is found, a special
object that indicates the classification of the owner
will be returned. The classification of the owner
will be done by class. (As byclass.)"""
def __init__(self, mod, name):
a = mod.Class
d = a.dictof
ad = (a & d).classifier
sup = a.classifier
Classifier.__init__(self, mod, name, cli=None, supers=[sup], depends=[ad])
self.sup = sup
self.a = a.classifier
self.d = d.classifier
self.ad = ad
def get_byname(self):
return '[dict of] class'
def get_cli(self):
return self.ad.cli
def get_kind(self, (ka, kd)):
if kd is self.d.notdicttag:
return self.a.get_kind(ka)
else:
return self.d.get_kind(kd)
def get_kindarg(self, kind):
if kind.fam is self.d.family:
ka = dict
kd = self.d.get_kindarg(kind)
else:
ka = self.a.get_kindarg(kind)
kd = self.d.notdicttag
return (ka, kd)
def get_tabheader(self, ctx=''):
return 'Kind (class / dict of class)'
def get_userkind(self, kind=None, dictof=None):
try:
if kind is None and dictof is not None:
if dictof == ():
do = self.mod.UniSet.Nothing
else:
do = self.sup.get_userkind(dictof)
return self.d.get_userkind(do)
elif kind is not None and dictof is None:
kind = self.mod.tc_adapt(kind)
if kind is dict:
raise TypeError, 'dict is not an equivalence class of Clodo, use dictof=() etc'
return self.sup.get_kind(kind)
else:
raise TypeError
except TypeError:
raise TypeError, """\
Argument should be either
<type or class except dict>
dictof=<type or class>
dictof=()"""
def get_userkindargrepr(self, kind):
if kind.fam is self.d.family:
if kind.arg is self.mod.UniSet.Nothing:
d = '()'
else:
d = self.d.ownerclassifier.get_userkindargrepr(kind.arg)
return 'dictof=%s'%d
else:
return kind.fam.classifier.get_userkindargrepr(kind)
def owners(self, X):
return self.d.owners(X)
class RetClaSetFamily:
def __init__(self, mod, classifier):
self.defrefining(mod.Use.Anything)
self.classifier = classifier
def _ge_ATOM(self, a, b):
# b is known to not be Nothing since its c_le doesn't call back
if self is b.fam:
return a.arg == b.arg
return b.fam.supercl is not None and b.fam.supercl <= a
def _le_ATOM(self, a, b):
# b is known to not be Nothing since its c_ge doesn't call back
if self is b.fam:
return a.arg == b.arg
return self.supercl is not None and self.supercl <= b
def c_alt(self, a, alt):
return a.arg.classifier.er.refdby.classifier.get_alt(a, alt)
return self.classifier.get_alt(a, alt)
def _get_arg_brief(self, a):
return a.arg.er.refdby.classifier.get_tabrendering(a, False)
def c_get_brief(self, a):
return '<referred by: %s>'%self._get_arg_brief(a)
def c_get_brief_alt(self, a, alt):
x = {
'<' : 'by less than',
'<=' : 'by at most',
'>=' : 'by at least',
'>' : 'by more than',
}[alt]
return '<referred %s: %s>'%(x, self._get_arg_brief(a))
def c_get_ckc(self, a):
return self.classifier, a.arg.clikinds, '=='
def c_repr(self, a):
return '%r.refdby'%a.arg
# Public
def sokind(self, sok):
if not isinstance(sok, SoKind):
raise TypeError, 'SoKind expected'
er = sok.classifier.er.refdby
kinds = (self(sok),)
return CallableSoKind(er, kinds)
class ByRetClaSet(Classifier):
def __init__(self, mod, name, rg, referrer_classifier, doc):
Classifier.__init__(self, mod, name, with_referrers=True)
self.rg = rg
self.referrer_classifier = referrer_classifier
self.family = self.mod.fam_mixin_argatom(RetClaSetFamily, self)
self.__doc__ = doc
def get_byname(self):
return 'referrer kinds'
def get_cli(self):
memo = {}
return self.mod.hv.cli_rcs(self.rg, self.referrer_classifier.cli, memo)
def get_inverted_refkind(self, k):
set_trace()
if k.fam.opname == 'OR':
ks = k.arg
elif k is self.mod.Use.Nothing:
ks = ()
else:
ks = (k,)
rks = []
for k in ks:
rks.append(self.referrer_classifier.get_kindarg(k))
return self.mod.ImpSet.immnodeset(rks)
def get_kind(self, k):
if k:
return self.family(QuickSoKind(self.referrer_classifier, k))
else:
return self.mod.refdbynothing
def get_tabheader(self, ctx=''):
th = 'Referrers by %s'%self.referrer_classifier.get_tabheader(ctx)
if ctx:
th = '{%s}'%th
return th
def get_tabrendering(self, cla, ctx):
rs = [self.referrer_classifier.get_tabrendering(x, ctx) for x in cla.arg.kinds]
rs.sort()
r = ', '.join(rs)
if ctx:
r = '{%s}'%r
elif not r:
r = '<Nothing>'
return r
def get_userkind(self, *args):
firstsok = None
clikinds = []
for arg in args:
if isinstance(arg, SoKind):
if not arg.classifier is self.referrer_classifier:
raise ValueError, 'Expected a SoKind with the %r classifier, argument had %r.'%(
self.referrer_classifier.name,
arg.classifier.name)
clikinds.extend(arg.clikinds)
if firstsok is None:
firstsok = arg
else:
# Assume we got a single kind
# get_kindarg takes care of classifier error checking
clikinds.append(self.referrer_classifier.get_kindarg(arg))
if len(args) > 1 or firstsok is None:
sok = QuickSoKind(self.referrer_classifier,
self.mod.ImpSet.immnodeset(clikinds))
else:
sok = firstsok
return self.family(sok)
class InRelFamily:
def __init__(self, mod, classifier):
self.classifier = classifier
self.defrefining(mod.Use.Anything)
def _eq_args(self, a, b):
# They are sequences (immnodesets) of relations.
# I have not memoized them since I was afraid they would last too long
# and I thought it not be worthwhile and hope this comparison is not done too often.
# So I will compare them as equality based sets.
a = dict([(x, ()) for x in a])
b = dict([(x, ()) for x in b])
return a == b
def _ge_ATOM(self, a, b):
# b is known to not be Nothing since its c_le doesn't call back
if self is b.fam:
return self._eq_args(a.arg, b.arg)
return b.fam.supercl is not None and b.fam.supercl <= a
def _le_ATOM(self, a, b):
# b is known to not be Nothing since its c_ge doesn't call back
if self is b.fam:
return self._eq_args(a.arg, b.arg)
return self.supercl is not None and self.supercl <= b
def c_alt(self, a, alt):
return self.classifier.get_alt(a, alt)
def c_get_brief(self, a):
return '<via %s>'%self.classifier.get_tabrendering(a, None)
def c_repr(self, a):
return '%s(%s)'%(self.classifier.get_reprname(),
self.classifier.get_userkindargrepr(a))
class ByInRel(Classifier):
def __init__(self, mod, name, rg):
Classifier.__init__(self, mod, name, with_referrers=True)
self.rg = rg
self.family = mod.fam_mixin_argatom(InRelFamily, self)
def _rel2str(self, r):
P = self.mod._parent.Path
t = P.rel_table
x = t[r.kind](r.relator)
return x.stra('')
def _str2rel(self, s):
# Parse a string as generated by rel2str,
# to recreate the relation object.
P = self.mod._parent.Path
orgs = s
def mkrel(R, *args):
return self.mod.View.heapyc.Relation(R.code, *args)
if s.startswith('_'):
s = s[1:]
if s.startswith('['):
s = s[1:].rstrip(']')
loc = {'hp':self.mod.Use}
r = eval(s, loc)
rel = mkrel(P.R_INDEXVAL, r)
elif s.startswith('.'):
s = s[1:]
if s.replace('_','x').isalnum():
rel = mkrel(P.R_ATTRIBUTE, s)
elif s.startswith('f_locals['):
s = s[9:].rstrip(']')
r = eval(s, {})
rel = mkrel(P.R_LOCAL_VAR, r)
elif s.startswith('f_locals ['):
s = s[10:].rstrip(']')
r = eval(s, {})
rel = mkrel(P.R_CELL, r)
elif s.startswith('keys()['):
s = s[7:].rstrip(']')
r = int(s)
rel = mkrel(P.R_INDEXKEY, r)
elif s.startswith('__dict__.keys()['):
s = s[16:].rstrip(']')
r = int(s)
rel = mkrel(P.R_HASATTR, r)
else:
raise SyntaxError, 'Cant make a relation of %r.'%orgs
elif s.startswith('->'):
s = s[2:]
if s.startswith('f_valuestack['):
s = s[13:].rstrip(']')
r = int(s)
rel = mkrel(P.R_STACK, r)
else:
rel = mkrel(P.R_INTERATTR, s)
else:
raise SyntaxError, 'Cant make a relation of %r.'%orgs
return rel
def get_byname(self):
return 'referred via'
def get_cli(self):
memokind = {}
memorel = {}
return self.mod.hv.cli_inrel(self.rg, memokind, memorel)
def get_kind(self, k):
return self.family(k)
def get_tabheader(self, ctx=''):
if not ctx:
return "Referred Via:"
else:
r = 'Referred Via'
if ctx == 'and':
r = '{%s}'%r
return r
def get_tabrendering(self, kind, ctx=''):
r = self.get_userkindargrepr(kind)
if ctx == 'and':
r = '{%s}'%r
return r
def get_userkind(self, *args):
return self.get_kind([self._str2rel(x) for x in args])
def get_userkindargrepr(self, kind):
a = [repr(self._rel2str(x)) for x in kind.arg]
a.sort()
return ', '.join(a)
class AndClassifier(Classifier):
def __init__(self, mod, name, args): # At least 2 args
if name is None:
name = '(%s)'%' & '.join([x.name for x in args])
Classifier.__init__(self, mod, name, cli=None, supers=args, depends=args)
self.args = args
def get_byname(self):
return '<%s>'%' & '.join([x.get_byname() for x in self.args])
def get_cli(self):
memo = {}
return self.mod.hv.cli_and(tuple([x.cli for x in self.args]), memo)
def get_kind(self, k):
ks = []
for ki, ci in zip(k, self.args):
ks.append(ci.get_kind(ki))
return self.mod.UniSet.fam_And._cons(ks)
def get_reprname(self):
return '(%s)'%' & '.join([x.get_reprname() for x in self.args])
def get_tabheader(self, ctx=''):
r = '%s'%' & '.join([x.get_tabheader('and') for x in self.args])
if ctx == 'and':
r = '(%s)'%r
return r
def get_tabrendering(self, cla, ctx=''):
ss = []
for a, cl in zip(cla.arg, self.args):
s = cl.get_tabrendering(a, 'and')
ss.append(s)
r = ' & '.join(ss)
if ctx == 'and':
r = '(%s)'%r
return r
class ModuleFamily:
def __init__(self, mod, classifier):
self.defrefining(mod.Use.Anything)
self.classifier = classifier
self.range = mod.fam_Family(self)
def c_contains(self, a, b):
return b is a.arg
def c_get_render(self, a):
return self.mod.summary_str(a.arg)
def c_get_brief(self, a):
return self.mod.summary_str(type(a.arg)) (a.arg)
def c_repr(self, a):
return '%s(%s)'%(self.classifier.get_reprname(),
self.classifier.get_userkindargrepr(a))
class ByModule(Classifier):
def __init__(self, mod, name):
def classify(x):
self.nc += 1
return x
cli = mod.hv.cli_user_defined(mod.Use.Type.classifier.cli,
mod.Use.Type.Module.arg,
classify,
None
)
Classifier.__init__(self, mod, name, cli)
self.not_module = ~mod.Use.Type.Module
self.nc = 0
self.family = mod.fam_mixin_argatom(ModuleFamily, self)
self.ModuleType = mod.types.ModuleType
def get_byname(self):
return 'module'
def get_kind(self, k):
if k is None:
return self.not_module
else:
return self.family(k)
def get_kindarg(self, kind):
if kind is self.not_module:
return None
else:
assert kind.fam is self.family
return kind.arg
def get_tabheader(self, ctx=''):
return 'Module'
def get_userkind(self, name=None, at=None):
if name is None and at is None:
return self.not_module
if at is None:
try:
m = self.mod.View.target.sys.modules[name]
except KeyError:
raise ValueError, 'No module %r in View.target.sys.modules.'%name
else:
m = self.mod.View.obj_at(at)
if not isinstance(m, self.ModuleType):
raise TypeError, 'The specified object is not of module type, but %r.'%type(m)
if name is not None and m.__name__ != name:
raise ValueError, 'The specified module has not name %r but %r.'%(name, m.__name__)
return self.family(m)
def get_userkindargrepr(self, kind):
if kind is self.not_module:
return ''
else:
m = kind.arg
name = m.__name__
s = '%r'%name
if self.mod._root.sys.modules.get(name) is not m:
s += ', at=%s'%hex(id(m))
return s
class AltFamily:
def __init__(self, mod, altcode):
if altcode not in ('<', '<=', '==', '!=', '>', '>='):
raise ValueError, 'No such comparison symbol: %r'%altcode
self.altcode = altcode
def c_get_brief(self, a):
return a.arg.fam.c_get_brief_alt(a.arg, self.altcode)
def c_get_ckc(self, a):
ckc = list(a.arg.get_ckc())
if ckc[-1] == '==':
ckc[-1] = self.altcode
else:
raise ValueError, 'Can not make alternative kind, non-equality comparison on underlying kind.'
return tuple(ckc)
def c_repr(self, a):
return '%s.alt(%r)'%(repr(a.arg), self.altcode)
class FindexFamily:
def __init__(self, mod, classifier):
self.defrefining(mod.Use.Anything)
self.classifier = classifier
self.range = mod.fam_Family(self)
def c_get_brief(self, a):
if not 0 <= a.arg < len(self.classifier.kinds):
return '<None>'
else:
return '%s / %d'%(self.classifier.kinds[a.arg].brief, a.arg)
def c_repr(self, a):
return '%s(%d)'%(self.classifier.get_reprname(), a.arg)
class ByFindex(Classifier):
def __init__(self, mod, name, kinds):
self.alts = [k.fam.c_get_ckc(k) for k in kinds]
depends = [ckc[0] for ckc in self.alts]
Classifier.__init__(self, mod, name, depends=depends)
self.kinds = kinds
self.family = mod.fam_mixin_argatom(FindexFamily, self)
def get_cli(self):
alts = tuple([(cla.cli, k, cmp) for (cla, k, cmp) in self.alts])
memo = {}
cli = self.mod.hv.cli_findex(alts, memo)
return cli
def get_byname(self):
return 'index of first matching kind of %s'%(self.kinds,)
def get_tabheader(self, ctx=''):
return 'First Matching Kind / Index'
class _GLUECLAMP_:
_imports_ = (
'_parent:ImpSet',
'_parent:View',
'_parent.View:hv',
'_parent:UniSet',
'_parent.UniSet:fam_mixin_argatom',
'_parent:Use',
'_root.guppy.etc.etc:str2int',
'_root:re',
'_root:types,'
)
def _er_by_(self, constructor, *args, **kwds):
return self.UniSet.fam_EquivalenceRelation(constructor, *args, **kwds)
# Exported equivalence relations
def _get_Class(self):
return self._er_by_(ByClass, self, name='Class')
def _get_Clodo(self):
return self._er_by_(ByClassOrDictOwner, self, name='Clodo')
def _get_Id(self):
return self._er_by_(ByIdentity, self, name='Id')
def _get_Idset(self):
return self._er_by_(ByIdentitySet, self, name='Idset')
def _get_Module(self):
return self._er_by_(ByModule, self, name='Module')
def _get_Unity(self):
return self._er_by_(ByUnity, self, name='Unity')
def _get_Rcs(self):
return self.mker_refdby(self.Clodo)
def mker_and(self, ers):
if len(ers) == 0:
return self.Unity
classifiers = [er.classifier for er in ers]
name = None
return self.UniSet.fam_EquivalenceRelation(AndClassifier, self, name, classifiers)
def mker_dictof(self, er, name=None):
if name is None:
name='%s.dictof'%er.classifier.name
return self.mker_memoized(
name,
lambda:
self._er_by_(ByDictOwner, self, name, er.classifier))
def _get_memo_er(self):
return {}
def mker_memoized(self, name, f):
v = self.memo_er.get(name)
if v is None:
self.memo_er[name] = v = f()
return v
def mker_refdby(self, er, name=None):
if name is None:
name='%s.refdby'%er.classifier.name
return self.mker_memoized(
name,
lambda:
self._er_by_(
ByRetClaSet,
self,
name,
self.View.rg,
er.classifier,
"""%s
Classify by <%s> of referrers.
This classifier uses the %r classifier to classify the
referrers of the object. The classifications of the referrers
are collected in a set. This set becomes the classification of
the object.
"""%(name, er.classifier.get_byname(), er.classifier.name ) ))
def _get_Size(self):
return self._er_by_(ByIndiSize, self, 'Size')
def _get_Type(self):
return self._er_by_(ByType, self, 'Type')
def _get_Via(self):
View = self.View
return self._er_by_(
ByInRel,
self,
'Via',
View.rg)
def tc_adapt(self, k):
# Adapt to a type or class.
# Accepts a type or class object, or a string representation
# (at least as) by tc_repr.
if (isinstance(k, self.types.TypeType) or
isinstance(k, self.types.ClassType)):
return k
if not isinstance(k, basestring):
raise TypeError, 'type, class or basestring expected'
err = ("String argument to tc_adapt should be of form\n"
"'<class MODULE.NAME at 0xADDR>' or\n"
"'<type MODULE.NAME at 0xADDR>' or\n"
"'<at 0xADDR>'. I got: %r"%k)
s = k
if not (s.startswith('<') and s.endswith('>')):
raise ValueError, err
s = s.lstrip('<').rstrip('>')
s = s.split(' ')
if len(s) < 2:
raise ValueError, err
t = s[0]
addr = self.str2int(s[-1])
kind = self.View.obj_at(addr)
if t == 'at':
if len(s) != 2:
raise ValueError, err
ty = None
else:
if len(s) != 4:
raise ValueError, err
if t not in ('type', 'class'):
raise ValueError, err
ty = getattr(self.types, t.capitalize()+'Type')
if not isinstance(kind, ty):
raise TypeError, '%s object expected'%t
if not s[2] == 'at':
raise ValueError, err
names = s[1].split('.')
if len(names) < 2:
raise ValueError, err
modulename = '.'.join(names[:-1])
tcname = names[-1]
if kind.__module__ != modulename:
raise ValueError, 'The %s %r has wrong __module__, expected %r.'%(t, kind, modulename)
if kind.__name__ != tcname:
raise ValueError, 'The %s %r has wrong __name__, expected %r.'%(t, kind, tcname)
return kind
def tc_repr(self, k):
# Represent a type or class object as a string,
# so that it can converted back via tc_adapt,
# as long as it still exists in the heap.
# There is no absolute guarantee that it will always become the same object,
# but I hope it will work well enough in practice.
# See also Notes Sep 7 2005.
if isinstance(k, self.types.TypeType):
t = 'type'
elif isinstance(k, self.types.ClassType):
t = 'class'
else:
raise TypeError, 'type or class expected'
return '<%s %s.%s at %s>'%(t, k.__module__, k.__name__, hex(id(k)))
# Convenience interfaces
def _get_alt(self):
altmemo = {
'==':lambda k:k,
'!=':lambda k:~k,
}
def alt(kind, cmp):
a = altmemo.get(cmp)
if a is None:
a = self.fam_mixin_argatom(AltFamily, cmp)
altmemo[cmp] = a
return a(kind)
return alt
def biper(self, kind):
return self.findex(kind)
def _get_dictof(self):
return self.fam_mixin_argatom(OwnedDictFamily)
def _get_dictofnothing(self):
return self.dictof(self.Use.Nothing)
def _get_invtypemod(self):
invtypemod = {}
for k, v in self.types._module.__dict__.items():
if k.endswith('Type'):
invtypemod[v] = k[:-4]
return invtypemod
def _get_notdict(self):
return ~self.Use.Type.Dict
def findex(self, *kinds):
return self._er_by_(
ByFindex,
self,
'findex(%s)'%', '.join([repr(k) for k in kinds]),
kinds
)
def _get_refdbynothing(self):
return self.sonokind.refdby
def sokind(self, *kinds):
"""sokind(0..*:Kind+) -> SetOfKind
"""
cla = None
clikinds = []
if not kinds:
raise ValueError, 'At least one argument must be given.'
for kind in kinds:
ckc = kind.get_ckc()
if cla is None:
cla = ckc[0]
else:
if ckc[0] is not cla:
raise ValueError, 'Kind at index %d has wrong classifier.'%len(clikinds)
if ckc[-1] != '==':
raise ValueError, 'Kind at index %d has wrong comparision.'%len(clikinds)
clikinds.append(ckc[1])
return QuickSoKind(cla, self.ImpSet.immnodeset(clikinds))
def _get_sonokind(self):
return SoNoKind(self.Unity, ())
| apache-2.0 |
capntransit/carfree-council | cfcensus2010.py | 1 | 1828 | import sys, os, json, time
import pandas as pd
BOROCODE = {'61' : '1', '05' : '2', '47': '3', '81' : '4', '85': '5'}
if (len(sys.argv) < 2):
print ("Usage: cfcensus.py census.csv districts.json")
exit()
censusfile = sys.argv[1]
councilfile = sys.argv[2]
TRACTCOL = 'BoroCT' # rename this for 2000 census
def boroCT (id2):
boro = BOROCODE[str(id2)[3:5]]
tract = str(id2)[5:]
return boro + tract
for (f) in ([censusfile, councilfile]):
if (not os.path.isfile(f)):
print ("File " + f + " is not readable")
exit()
try:
vehDf = pd.read_csv(
censusfile,
skiprows=[1]
)
except Exception as e:
print ("Unable to read census file " + censusfile + ": {0}".format(e))
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
vehDf['pctNoVeh'] = vehDf['HD01_VD03'].astype('int') / vehDf['HD01_VD01'].astype('int')
vehDf[TRACTCOL] = vehDf['GEO.id2'].apply(boroCT)
vehDf2 = pd.DataFrame(vehDf[[TRACTCOL, 'HD01_VD01', 'HD01_VD03', 'pctNoVeh']]).set_index(TRACTCOL)
f = 0
total = {}
noVeh = {}
councilDistricts = set()
for (t, c) in councilData.items():
for (d) in c:
councilDistricts.add(d)
try:
total[d] = total.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD01']
noVeh[d] = noVeh.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD03']
except KeyError as e:
print("No entry for census tract " + str(t))
for (d) in sorted(councilDistricts, key=int):
print (','.join([
d,
str(int(total[d])),
str(int(noVeh[d])),
str(round((noVeh[d] / total[d]), 3))
]))
| gpl-3.0 |
immenz/pyload | module/plugins/accounts/SimplyPremiumCom.py | 2 | 1650 | # -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.Account import Account
class SimplyPremiumCom(Account):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.05"
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def loadAccountInfo(self, user, req):
premium = False
validuntil = -1
trafficleft = None
json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
self.logDebug("JSON data: %s" % json_data)
json_data = json_loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip']:
premium = True
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = float(json_data['result']['timeend'])
if 'remain_traffic' in json_data['result'] and json_data['result']['remain_traffic']:
trafficleft = float(json_data['result']['remain_traffic']) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {"premium": premium, "validuntil": validuntil, "trafficleft": trafficleft}
def login(self, user, data, req):
req.cj.setCookie("simply-premium.com", "lang", "EN")
html = req.load("http://www.simply-premium.com/login.php",
post={'key': user} if not data['password'] else {'login_name': user, 'login_pass': data['password']},
decode=True)
if 'logout' not in html:
self.wrongPassword()
| gpl-3.0 |
eharney/cinder | cinder/scheduler/filters/capacity_filter.py | 1 | 8982 | # Copyright (c) 2012 Intel
# Copyright (c) 2012 OpenStack Foundation
# Copyright (c) 2015 EMC Corporation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from oslo_log import log as logging
from cinder.scheduler import filters
LOG = logging.getLogger(__name__)
class CapacityFilter(filters.BaseBackendFilter):
"""Capacity filters based on volume backend's capacity utilization."""
def backend_passes(self, backend_state, filter_properties):
"""Return True if host has sufficient capacity."""
volid = None
# If the volume already exists on this host, don't fail it for
# insufficient capacity (e.g., if we are retyping)
if backend_state.backend_id == filter_properties.get('vol_exists_on'):
return True
spec = filter_properties.get('request_spec')
if spec:
volid = spec.get('volume_id')
grouping = 'cluster' if backend_state.cluster_name else 'host'
if filter_properties.get('new_size'):
# If new_size is passed, we are allocating space to extend a volume
requested_size = (int(filter_properties.get('new_size')) -
int(filter_properties.get('size')))
LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend '
'the volume %(id)s in %(size)s GB',
{'grouping': grouping,
'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size})
else:
requested_size = filter_properties.get('size')
LOG.debug('Checking if %(grouping)s %(grouping_name)s can create '
'a %(size)s GB volume (%(id)s)',
{'grouping': grouping,
'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size})
# requested_size is 0 means that it's a manage request.
if requested_size == 0:
return True
if backend_state.free_capacity_gb is None:
# Fail Safe
LOG.error("Free capacity not set: "
"volume node info collection broken.")
return False
free_space = backend_state.free_capacity_gb
total_space = backend_state.total_capacity_gb
reserved = float(backend_state.reserved_percentage) / 100
if free_space in ['infinite', 'unknown']:
# NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the
# request. Even if it was not, the retry mechanism is
# able to handle the failure by rescheduling
return True
elif total_space in ['infinite', 'unknown']:
# If total_space is 'infinite' or 'unknown' and reserved
# is 0, we assume the back-ends can serve the request.
# If total_space is 'infinite' or 'unknown' and reserved
# is not 0, we cannot calculate the reserved space.
# float(total_space) will throw an exception. total*reserved
# also won't work. So the back-ends cannot serve the request.
if reserved == 0:
return True
LOG.debug("Cannot calculate GB of reserved space (%s%%) with "
"backend's reported total capacity '%s'",
backend_state.reserved_percentage, total_space)
return False
total = float(total_space)
if total <= 0:
LOG.warning("Insufficient free space for volume creation. "
"Total capacity is %(total).2f on %(grouping)s "
"%(grouping_name)s.",
{"total": total,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
return False
# Calculate how much free space is left after taking into account
# the reserved space.
free = free_space - math.floor(total * reserved)
# NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs,
# we will not use max_over_subscription_ratio and
# provisioned_capacity_gb to determine whether a volume can be
# provisioned. Instead free capacity will be used to evaluate.
thin = True
vol_type = filter_properties.get('volume_type', {}) or {}
provision_type = vol_type.get('extra_specs', {}).get(
'provisioning:type')
if provision_type == 'thick':
thin = False
# Only evaluate using max_over_subscription_ratio if
# thin_provisioning_support is True. Check if the ratio of
# provisioned capacity over total capacity has exceeded over
# subscription ratio.
if (thin and backend_state.thin_provisioning_support and
backend_state.max_over_subscription_ratio >= 1):
provisioned_ratio = ((backend_state.provisioned_capacity_gb +
requested_size) / total)
if provisioned_ratio > backend_state.max_over_subscription_ratio:
msg_args = {
"provisioned_ratio": provisioned_ratio,
"oversub_ratio": backend_state.max_over_subscription_ratio,
"grouping": grouping,
"grouping_name": backend_state.backend_id,
}
LOG.warning(
"Insufficient free space for thin provisioning. "
"The ratio of provisioned capacity over total capacity "
"%(provisioned_ratio).2f has exceeded the maximum over "
"subscription ratio %(oversub_ratio).2f on %(grouping)s "
"%(grouping_name)s.", msg_args)
return False
else:
# Thin provisioning is enabled and projected over-subscription
# ratio does not exceed max_over_subscription_ratio. The host
# passes if "adjusted" free virtual capacity is enough to
# accommodate the volume. Adjusted free virtual capacity is
# the currently available free capacity (taking into account
# of reserved space) which we can over-subscribe.
adjusted_free_virtual = (
free * backend_state.max_over_subscription_ratio)
res = adjusted_free_virtual >= requested_size
if not res:
msg_args = {"available": adjusted_free_virtual,
"size": requested_size,
"grouping": grouping,
"grouping_name": backend_state.backend_id}
LOG.warning("Insufficient free virtual space "
"(%(available)sGB) to accommodate thin "
"provisioned %(size)sGB volume on %(grouping)s"
" %(grouping_name)s.", msg_args)
return res
elif thin and backend_state.thin_provisioning_support:
LOG.warning("Filtering out %(grouping)s %(grouping_name)s "
"with an invalid maximum over subscription ratio "
"of %(oversub_ratio).2f. The ratio should be a "
"minimum of 1.0.",
{"oversub_ratio":
backend_state.max_over_subscription_ratio,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
return False
msg_args = {"grouping_name": backend_state.backend_id,
"grouping": grouping,
"requested": requested_size,
"available": free}
if free < requested_size:
LOG.warning("Insufficient free space for volume creation "
"on %(grouping)s %(grouping_name)s (requested / "
"avail): %(requested)s/%(available)s",
msg_args)
return False
LOG.debug("Space information for volume creation "
"on %(grouping)s %(grouping_name)s (requested / avail): "
"%(requested)s/%(available)s", msg_args)
return True
| apache-2.0 |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/distutils/cygwinccompiler.py | 3 | 17292 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: cygwinccompiler.py 73349 2009-06-11 09:17:19Z tarek.ziade $"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
s = f.read()
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
| gpl-2.0 |
lukauskas/scipy | scipy/interpolate/setup.py | 106 | 1596 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
lapack_opt = get_info('lapack_opt', notfound_action=2)
config = Configuration('interpolate', parent_package, top_path)
fitpack_src = [join('fitpack', '*.f')]
config.add_library('fitpack', sources=fitpack_src)
config.add_extension('interpnd',
sources=['interpnd.c'])
config.add_extension('_ppoly',
sources=['_ppoly.c'],
**lapack_opt)
config.add_extension('_fitpack',
sources=['src/_fitpackmodule.c'],
libraries=['fitpack'],
depends=(['src/__fitpack.h','src/multipack.h']
+ fitpack_src)
)
config.add_extension('dfitpack',
sources=['src/fitpack.pyf'],
libraries=['fitpack'],
depends=fitpack_src,
)
config.add_extension('_interpolate',
sources=['src/_interpolate.cpp'],
include_dirs=['src'],
depends=['src/interpolate.h'])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
hchen1202/django-react | virtualenv/lib/python3.6/site-packages/django/core/mail/backends/console.py | 696 | 1477 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| mit |
mganeva/mantid | Framework/PythonInterface/test/python/plugins/algorithms/GetNegMuMuonicXRDTest.py | 1 | 7298 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.kernel import *
from mantid.api import *
from mantid.simpleapi import *
class GetNegMuMuonicXRDTest(unittest.TestCase):
au_muonic_xr = [8135.2,8090.6,8105.4,8069.4,5764.89,5594.97,3360.2,
3206.8,2474.22,2341.21,2304.44,1436.05,1391.58,1104.9,
899.14,869.98,405.654,400.143]
as_muonic_xr = [1866.9,1855.8,436.6,427.5]
#TESTING FOR ONE WORKSPACE IN GROUP WORKSPACE
def test_muonic_xrd_single_ws_in_group(self):
self.au_muonic_xr.sort()
self.as_muonic_xr.sort()
#Setting up the work space manually
au_peak_values = self.au_muonic_xr
y_position = -0.001 #same as default used by GetNegMuMuonic
y_pos_ws = [y_position]*len(au_peak_values)
au_muon_xr_ws = CreateWorkspace(au_peak_values[:], y_pos_ws[:])
#Check that au_muon_xr_ws is not null
self.assertFalse(au_muon_xr_ws==None)
au_muon_group = GroupWorkspaces(au_muon_xr_ws)
#Check that au_muon_group is not null
self.assertFalse(au_muon_group==None)
#Get the algorithm to produce the same workspace
neg_mu_xr_group = GetNegMuMuonicXRD("Au") #testing default y-Axis position value
#Check that neg_mu_xr_ws is not null
self.assertFalse(neg_mu_xr_group==None)
#Test number of workspaces in group
self.assertEqual(au_muon_group.getNumberOfEntries(),
neg_mu_xr_group.getNumberOfEntries())
self.assertTrue(au_muon_group.size() == 1)
self.assertTrue(neg_mu_xr_group.size() == 1)
#now testing the one workspace in the workspace group
neg_mu_xr_ws = neg_mu_xr_group[0]
au_muon_ws = au_muon_group[0]
#check number of histograms are equal
self.assertEqual(neg_mu_xr_ws.getNumberHistograms(), au_muon_ws.getNumberHistograms())
#check number of bins is equal
self.assertEqual(au_muon_ws.blocksize(), neg_mu_xr_ws.blocksize())
#check length of XValues is the same
self.assertEqual(len(au_muon_ws.readX(0)), len(neg_mu_xr_ws.readX(0)))
#check all the XValues are the same
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(au_muon_ws.readX(0),neg_mu_xr_ws.readX(0))
#INSTEAD we will use a simple for loop
for x_value in range(len(au_muon_ws.readX(0))):
self.assertEqual(au_muon_ws.readX(0)[x_value], neg_mu_xr_ws.readX(0)[x_value])
#check length of YValues is the same
self.assertEqual(len(au_muon_ws.readY(0)), len(neg_mu_xr_ws.readY(0)))
#check all the YValues are the same
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(au_muon_ws.readY(0),neg_mu_xr_ws.readY(0))
#INSTEAD we will use a simple for loop
for y_value in range(len(au_muon_ws.readY(0))):
self.assertEqual(au_muon_ws.readY(0)[y_value], neg_mu_xr_ws.readY(0)[y_value])
#TESTING FOR MORE THAN ONE WORKSPACE IN GROUP WORKSPACE
def test_muonic_xrd_more_than_one_ws_in_group(self):
self.au_muonic_xr.sort()
self.as_muonic_xr.sort()
y_position = 0.2
#Setting up au_muonic workspace
au_peak_values = self.au_muonic_xr
#check to see if workspace has been set to non-None value
self.assertFalse(au_peak_values == None)
au_y_pos_ws = [y_position]*len(au_peak_values)
#setting up as_muonic workspace
as_peak_values = self.as_muonic_xr
#check to see if workspace has been set to non-None value
self.assertFalse(as_peak_values == None)
as_y_pos_ws = [y_position]*len(as_peak_values)
au_muon_xr_ws = CreateWorkspace(au_peak_values,au_y_pos_ws[:])
#check to see if workspace creation was successful
self.assertFalse(au_muon_xr_ws == None)
as_muon_xr_ws = CreateWorkspace(as_peak_values, as_y_pos_ws[:])
#check to see if workspace creation was successful
self.assertFalse(as_muon_xr_ws == None)
ws_list = [au_muon_xr_ws,as_muon_xr_ws]
grouped_muon_ws = GroupWorkspaces(ws_list)
#check to see whether grouping workspaces was successful
self.assertFalse(grouped_muon_ws == None)
#Run algorithm that creates muonic_xr group workspace
group_muonic_xr_ws = GetNegMuMuonicXRD("Au,As", 0.2)
#check that this has assigned value correctly
self.assertFalse(group_muonic_xr_ws == None)
#Compare histograms for each of the workspaces in GroupWorkspaces created
self.assertEqual(grouped_muon_ws[0].getNumberHistograms(), group_muonic_xr_ws[0].getNumberHistograms())
self.assertEqual(grouped_muon_ws[1].getNumberHistograms(), group_muonic_xr_ws[1].getNumberHistograms())
#Compare length of X values read from each workspace in grouped workspace
self.assertEqual(len(grouped_muon_ws[0].readX(0)), len(group_muonic_xr_ws[0].readX(0)))
self.assertEqual(len(grouped_muon_ws[1].readX(0)), len(group_muonic_xr_ws[1].readX(0)))
#Compare X values read from each workspace in grouped workspace
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(grouped_muon_ws[0].readX(0), group_muonic_xr_ws[0].readX(0))
#self.assertItemsEqual(grouped_muon_ws[1].readX(0), group_muonic_xr_ws[1].readX(0))
#INSTEAD we will use a simple for loop
for x_value in range(len(grouped_muon_ws[0].readX(0))):
self.assertEqual(grouped_muon_ws[0].readX(0)[x_value], group_muonic_xr_ws[0].readX(0)[x_value])
for x_value in range(len(grouped_muon_ws[1].readX(0))):
self.assertEqual(grouped_muon_ws[1].readX(0)[x_value], group_muonic_xr_ws[1].readX(0)[x_value])
#Compare length of Y values read from each workspace in grouped workspace
self.assertEqual(len(grouped_muon_ws[0].readY(0)), len(group_muonic_xr_ws[0].readY(0)))
self.assertEqual(len(grouped_muon_ws[1].readY(0)), len(group_muonic_xr_ws[1].readY(0)))
#Compare Y values read from each workspace in grouped workspace
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(grouped_muon_ws[0].readY(0), group_muonic_xr_ws[0].readY(0))
#self.assertItemsEqual(grouped_muon_ws[1].readY(0), group_muonic_xr_ws[1].readY(0))
#INSTEAD we will use a simple for loop
for y_value in range(len(grouped_muon_ws[0].readY(0))):
self.assertEqual(grouped_muon_ws[0].readY(0)[y_value], group_muonic_xr_ws[0].readY(0)[y_value])
for y_value in range(len(grouped_muon_ws[1].readY(0))):
self.assertEqual(grouped_muon_ws[1].readY(0)[y_value], group_muonic_xr_ws[1].readY(0)[y_value])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
komsas/OpenUpgrade | addons/payment_buckaroo/models/buckaroo.py | 102 | 8291 | # -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerBuckaroo(osv.Model):
_inherit = 'payment.acquirer'
def _get_buckaroo_urls(self, cr, uid, environment, context=None):
""" Buckaroo URLs
"""
if environment == 'prod':
return {
'buckaroo_form_url': 'https://checkout.buckaroo.nl/html/',
}
else:
return {
'buckaroo_form_url': 'https://testcheckout.buckaroo.nl/html/',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerBuckaroo, self)._get_providers(cr, uid, context=context)
providers.append(['buckaroo', 'Buckaroo'])
return providers
_columns = {
'brq_websitekey': fields.char('WebsiteKey', required_if_provider='buckaroo'),
'brq_secretkey': fields.char('SecretKey', required_if_provider='buckaroo'),
}
def _buckaroo_generate_digital_sign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting buckaroo) or 'out' (buckaroo
contacting openerp).
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'buckaroo'
keys = "add_returndata Brq_amount Brq_culture Brq_currency Brq_invoicenumber Brq_return Brq_returncancel Brq_returnerror Brq_returnreject brq_test Brq_websitekey".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
if inout == 'out':
if 'BRQ_SIGNATURE' in values:
del values['BRQ_SIGNATURE']
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s' % (k, v) for k, v in items)
else:
sign = ''.join('%s=%s' % (k,get_value(k)) for k in keys)
#Add the pre-shared secret key at the end of the signature
sign = sign + acquirer.brq_secretkey
if isinstance(sign, str):
sign = urlparse.parse_qsl(sign)
shasign = sha1(sign).hexdigest()
return shasign
def buckaroo_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
buckaroo_tx_values = dict(tx_values)
buckaroo_tx_values.update({
'Brq_websitekey': acquirer.brq_websitekey,
'Brq_amount': tx_values['amount'],
'Brq_currency': tx_values['currency'] and tx_values['currency'].name or '',
'Brq_invoicenumber': tx_values['reference'],
'brq_test' : True,
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': 'en-US',
})
if buckaroo_tx_values.get('return_url'):
buckaroo_tx_values['add_returndata'] = {'return_url': '%s' % buckaroo_tx_values.pop('return_url')}
else:
buckaroo_tx_values['add_returndata'] = ''
buckaroo_tx_values['Brq_signature'] = self._buckaroo_generate_digital_sign(acquirer, 'in', buckaroo_tx_values)
return partner_values, buckaroo_tx_values
def buckaroo_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_buckaroo_urls(cr, uid, acquirer.environment, context=context)['buckaroo_form_url']
class TxBuckaroo(osv.Model):
_inherit = 'payment.transaction'
# buckaroo status
_buckaroo_valid_tx_status = [190]
_buckaroo_pending_tx_status = [790, 791, 792, 793]
_buckaroo_cancel_tx_status = [890, 891]
_buckaroo_error_tx_status = [490, 491, 492]
_buckaroo_reject_tx_status = [690]
_columns = {
'buckaroo_txnid': fields.char('Transaction ID'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _buckaroo_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from buckaroo, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('BRQ_INVOICENUMBER'), data.get('BRQ_PAYMENT'), data.get('BRQ_SIGNATURE')
if not reference or not pay_id or not shasign:
error_msg = 'Buckaroo: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Buckaroo: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
#verify shasign
shasign_check = self.pool['payment.acquirer']._buckaroo_generate_digital_sign(tx.acquirer_id, 'out' ,data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Buckaroo: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _buckaroo_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
if tx.acquirer_reference and data.get('BRQ_TRANSACTIONS') != tx.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('BRQ_TRANSACTIONS'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('BRQ_AMOUNT', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('BRQ_AMOUNT'), '%.2f' % tx.amount))
if data.get('BRQ_CURRENCY') != tx.currency_id.name:
invalid_parameters.append(('Currency', data.get('BRQ_CURRENCY'), tx.currency_id.name))
return invalid_parameters
def _buckaroo_form_validate(self, cr, uid, tx, data, context=None):
status_code = int(data.get('BRQ_STATUSCODE','0'))
if status_code in self._buckaroo_valid_tx_status:
tx.write({
'state': 'done',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_pending_tx_status:
tx.write({
'state': 'pending',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_cancel_tx_status:
tx.write({
'state': 'cancel',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
else:
error = 'Buckaroo: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return False
| agpl-3.0 |
vorushin/FamilyFeed | sources/facebook.py | 1 | 1748 | from datetime import datetime
import json
from urllib2 import urlopen, HTTPError
from django.db.models import Max
from sources.models import FacebookPost
def time(s):
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S+0000')
def post_text(item):
return item.get('message', u'') + item.get('description', u'')
def list_posts(access_token):
latest_created_time = FacebookPost.objects\
.filter(access_token=access_token)\
.aggregate(Max('created_time'))['created_time__max']
'''for post in new_posts(access_token, latest_created_time):
if not FacebookPost.objects.filter(
access_token=access_token,
created_time=time(post['created_time'])).exists():
FacebookPost.objects.create(
access_token=access_token,
created_time=time(post['created_time']),
data=post)'''
return [p.data for p in FacebookPost.objects \
.filter(access_token=access_token) \
.order_by('-created_time')]
def new_posts(access_token, older_than=None):
graph_url = 'https://graph.facebook.com/me/feed?access_token=%s' % \
access_token
graph_url += '&limit=1000'
if older_than:
graph_url += '&since=' + older_than.isoformat()
resp = json.loads(urlopen(graph_url).read())
while resp['data']:
for item in resp['data']:
if older_than:
if time(item['created_time']) <= older_than:
return
if item.get('message'):
yield item
try:
resp = json.loads(urlopen(resp['paging']['next']).read())
except HTTPError:
break
| mit |
Carpetsmoker/qutebrowser | tests/end2end/features/test_editor_bdd.py | 2 | 2233 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import sys
import json
import textwrap
import pytest_bdd as bdd
bdd.scenarios('editor.feature')
@bdd.when(bdd.parsers.parse('I set up a fake editor replacing "{text}" by '
'"{replacement}"'))
def set_up_editor_replacement(quteproc, server, tmpdir, text, replacement):
"""Set up editor.command to a small python script doing a replacement."""
text = text.replace('(port)', str(server.port))
script = tmpdir / 'script.py'
script.write(textwrap.dedent("""
import sys
with open(sys.argv[1], encoding='utf-8') as f:
data = f.read()
data = data.replace("{text}", "{replacement}")
with open(sys.argv[1], 'w', encoding='utf-8') as f:
f.write(data)
""".format(text=text, replacement=replacement)))
editor = json.dumps([sys.executable, str(script), '{}'])
quteproc.set_setting('editor.command', editor)
@bdd.when(bdd.parsers.parse('I set up a fake editor returning "{text}"'))
def set_up_editor(quteproc, server, tmpdir, text):
"""Set up editor.command to a small python script inserting a text."""
script = tmpdir / 'script.py'
script.write(textwrap.dedent("""
import sys
with open(sys.argv[1], 'w', encoding='utf-8') as f:
f.write({text!r})
""".format(text=text)))
editor = json.dumps([sys.executable, str(script), '{}'])
quteproc.set_setting('editor.command', editor)
| gpl-3.0 |
dethos/cloudroutes-service | src/actions/actions/aws-ec2stop/__init__.py | 6 | 1495 | #!/usr/bin/python
######################################################################
# Cloud Routes Bridge
# -------------------------------------------------------------------
# Actions Module
######################################################################
import boto.ec2
import time
def action(**kwargs):
''' This method is called to action a reaction '''
redata = kwargs['redata']
jdata = kwargs['jdata']
logger = kwargs['logger']
run = True
# Check for Trigger
if redata['trigger'] > jdata['failcount']:
run = False
# Check for lastrun
checktime = time.time() - float(redata['lastrun'])
if checktime < redata['frequency']:
run = False
if redata['data']['call_on'] not in jdata['check']['status']:
run = False
if run:
return actionEC2(redata, jdata, logger)
else:
return None
def actionEC2(redata, jdata, logger):
''' Perform EC2 Actions '''
try:
conn = boto.ec2.connect_to_region(
redata['data']['region'],
aws_access_key_id=redata['data']['aws_access_key'],
aws_secret_access_key=redata['data']['aws_secret_key'])
try:
conn.stop_instances(instance_ids=[redata['data']['instance_id']])
return True
except:
return False
except:
line = "aws-ec2stop: Could not connect to AWR for monitor %s" % jdata[
'cid']
logger.info(line)
return False
| agpl-3.0 |
JCBarahona/edX | lms/djangoapps/certificates/views/support.py | 52 | 5649 | """
Certificate end-points used by the student support UI.
See lms/djangoapps/support for more details.
"""
import logging
from functools import wraps
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseServerError
)
from django.views.decorators.http import require_GET, require_POST
from django.db.models import Q
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from student.models import User, CourseEnrollment
from courseware.access import has_access
from util.json_request import JsonResponse
from certificates import api
log = logging.getLogger(__name__)
def require_certificate_permission(func):
"""
View decorator that requires permission to view and regenerate certificates.
"""
@wraps(func)
def inner(request, *args, **kwargs): # pylint:disable=missing-docstring
if has_access(request.user, "certificates", "global"):
return func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return inner
@require_GET
@require_certificate_permission
def search_by_user(request):
"""
Search for certificates for a particular user.
Supports search by either username or email address.
Arguments:
request (HttpRequest): The request object.
Returns:
JsonResponse
Example Usage:
GET /certificates/search?query=bob@example.com
Response: 200 OK
Content-Type: application/json
[
{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
query = request.GET.get("query")
if not query:
return JsonResponse([])
try:
user = User.objects.get(Q(email=query) | Q(username=query))
except User.DoesNotExist:
return JsonResponse([])
certificates = api.get_certificates_for_user(user.username)
for cert in certificates:
cert["course_key"] = unicode(cert["course_key"])
cert["created"] = cert["created"].isoformat()
cert["modified"] = cert["modified"].isoformat()
return JsonResponse(certificates)
def _validate_regen_post_params(params):
"""
Validate request POST parameters to the regenerate certificates end-point.
Arguments:
params (QueryDict): Request parameters.
Returns: tuple of (dict, HttpResponse)
"""
# Validate the username
try:
username = params.get("username")
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = _("User {username} does not exist").format(username=username)
return None, HttpResponseBadRequest(msg)
# Validate the course key
try:
course_key = CourseKey.from_string(params.get("course_key"))
except InvalidKeyError:
msg = _("{course_key} is not a valid course key").format(course_key=params.get("course_key"))
return None, HttpResponseBadRequest(msg)
return {"user": user, "course_key": course_key}, None
@require_POST
@require_certificate_permission
def regenerate_certificate_for_user(request):
"""
Regenerate certificates for a user.
This is meant to be used by support staff through the UI in lms/djangoapps/support
Arguments:
request (HttpRequest): The request object
Returns:
HttpResponse
Example Usage:
POST /certificates/regenerate
* username: "bob"
* course_key: "edX/DemoX/Demo_Course"
Response: 200 OK
"""
# Check the POST parameters, returning a 400 response if they're not valid.
params, response = _validate_regen_post_params(request.POST)
if response is not None:
return response
# Check that the course exists
course = modulestore().get_course(params["course_key"])
if course is None:
msg = _("The course {course_key} does not exist").format(course_key=params["course_key"])
return HttpResponseBadRequest(msg)
# Check that the user is enrolled in the course
if not CourseEnrollment.is_enrolled(params["user"], params["course_key"]):
msg = _("User {username} is not enrolled in the course {course_key}").format(
username=params["user"].username,
course_key=params["course_key"]
)
return HttpResponseBadRequest(msg)
# Attempt to regenerate certificates
try:
api.regenerate_user_certificates(params["user"], params["course_key"], course=course)
except: # pylint: disable=bare-except
# We are pessimistic about the kinds of errors that might get thrown by the
# certificates API. This may be overkill, but we're logging everything so we can
# track down unexpected errors.
log.exception(
"Could not regenerate certificates for user %s in course %s",
params["user"].id,
params["course_key"]
)
return HttpResponseServerError(_("An unexpected error occurred while regenerating certificates."))
log.info(
"Started regenerating certificates for user %s in course %s from the support page.",
params["user"].id, params["course_key"]
)
return HttpResponse(200)
| agpl-3.0 |
ESOedX/edx-platform | openedx/core/djangoapps/api_admin/tests/test_models.py | 2 | 6637 | # pylint: disable=missing-docstring
from __future__ import absolute_import
from smtplib import SMTPException
import ddt
import mock
import six
from django.db import IntegrityError
from django.test import TestCase
from openedx.core.djangoapps.api_admin.models import ApiAccessConfig, ApiAccessRequest
from openedx.core.djangoapps.api_admin.models import log as model_log
from openedx.core.djangoapps.api_admin.tests.factories import ApiAccessRequestFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import UserFactory
@ddt.ddt
@skip_unless_lms
class ApiAccessRequestTests(TestCase):
def setUp(self):
super(ApiAccessRequestTests, self).setUp()
self.user = UserFactory()
self.request = ApiAccessRequestFactory(user=self.user)
def test_default_status(self):
self.assertEqual(self.request.status, ApiAccessRequest.PENDING)
self.assertFalse(ApiAccessRequest.has_api_access(self.user))
def test_approve(self):
self.request.approve()
self.assertEqual(self.request.status, ApiAccessRequest.APPROVED)
def test_deny(self):
self.request.deny()
self.assertEqual(self.request.status, ApiAccessRequest.DENIED)
def test_nonexistent_request(self):
"""Test that users who have not requested API access do not get it."""
other_user = UserFactory()
self.assertFalse(ApiAccessRequest.has_api_access(other_user))
@ddt.data(
(ApiAccessRequest.PENDING, False),
(ApiAccessRequest.DENIED, False),
(ApiAccessRequest.APPROVED, True),
)
@ddt.unpack
def test_has_access(self, status, should_have_access):
self.request.status = status
self.request.save()
self.assertEqual(ApiAccessRequest.has_api_access(self.user), should_have_access)
def test_unique_per_user(self):
with self.assertRaises(IntegrityError):
ApiAccessRequestFactory(user=self.user)
def test_no_access(self):
self.request.delete()
self.assertIsNone(ApiAccessRequest.api_access_status(self.user))
def test_unicode(self):
request_unicode = six.text_type(self.request)
self.assertIn(self.request.website, request_unicode)
self.assertIn(self.request.status, request_unicode)
def test_retire_user_success(self):
retire_result = self.request.retire_user(self.user)
self.assertTrue(retire_result)
self.assertEqual(self.request.company_address, '')
self.assertEqual(self.request.company_name, '')
self.assertEqual(self.request.website, '')
self.assertEqual(self.request.reason, '')
def test_retire_user_do_not_exist(self):
user2 = UserFactory()
retire_result = self.request.retire_user(user2)
self.assertFalse(retire_result)
class ApiAccessConfigTests(TestCase):
def test_unicode(self):
self.assertEqual(
six.text_type(ApiAccessConfig(enabled=True)),
u'ApiAccessConfig [enabled=True]'
)
self.assertEqual(
six.text_type(ApiAccessConfig(enabled=False)),
u'ApiAccessConfig [enabled=False]'
)
@skip_unless_lms
class ApiAccessRequestSignalTests(TestCase):
def setUp(self):
super(ApiAccessRequestSignalTests, self).setUp()
self.user = UserFactory()
self.api_access_request = ApiAccessRequest(user=self.user, site=SiteFactory())
self.send_new_pending_email_function = 'openedx.core.djangoapps.api_admin.models._send_new_pending_email'
self.send_decision_email_function = 'openedx.core.djangoapps.api_admin.models._send_decision_email'
def test_save_signal_success_new_email(self):
""" Verify that initial save sends new email and no decision email. """
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.save()
mock_new_email.assert_called_once_with(self.api_access_request)
self.assertFalse(mock_decision_email.called)
def test_save_signal_success_decision_email(self):
""" Verify that updating request status sends decision email and no new email. """
self.api_access_request.save()
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.approve()
mock_decision_email.assert_called_once_with(self.api_access_request)
self.assertFalse(mock_new_email.called)
def test_save_signal_success_no_emails(self):
""" Verify that updating request status again sends no emails. """
self.api_access_request.save()
self.api_access_request.approve()
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.deny()
self.assertFalse(mock_decision_email.called)
self.assertFalse(mock_new_email.called)
def test_save_signal_failure_email(self):
""" Verify that saving still functions even on email errors. """
self.assertIsNone(self.api_access_request.id)
mail_function = 'openedx.core.djangoapps.api_admin.models.send_mail'
with mock.patch(mail_function, side_effect=SMTPException):
with mock.patch.object(model_log, 'exception') as mock_model_log_exception:
self.api_access_request.save()
# Verify that initial save logs email errors properly
mock_model_log_exception.assert_called_once_with(
u'Error sending API user notification email for request [%s].', self.api_access_request.id
)
# Verify object saved
self.assertIsNotNone(self.api_access_request.id)
with mock.patch(mail_function, side_effect=SMTPException):
with mock.patch.object(model_log, 'exception') as mock_model_log_exception:
self.api_access_request.approve()
# Verify that updating request status logs email errors properly
mock_model_log_exception.assert_called_once_with(
u'Error sending API user notification email for request [%s].', self.api_access_request.id
)
# Verify object saved
self.assertEqual(self.api_access_request.status, ApiAccessRequest.APPROVED)
| agpl-3.0 |
davidyezsetz/kuma | vendor/packages/Werkzeug/werkzeug/http.py | 6 | 26015 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilties that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import inspect
try:
from email.utils import parsedate_tz, mktime_tz
except ImportError:
from email.Utils import parsedate_tz, mktime_tz
from cStringIO import StringIO
from tempfile import TemporaryFile
from urllib2 import parse_http_list as _parse_list_header
from datetime import datetime
from itertools import chain, repeat
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
from werkzeug._internal import _decode_unicode, HTTP_STATUS_CODES
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_pop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade'
])
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1].replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in options.iteritems():
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iterable.iteritems():
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
:param value: a string with a list header.
:return: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict. If there is no value for a key it will
be `None`.
:param value: a string with a dict header.
:return: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type: text/html', {'mimetype': 'text/html'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
while string[:1] == ';':
string = string[1:]
end = string.find(';')
while end > 0 and string.count('"', 0, end) % 2:
end = string.find(';', end + 1)
if end < 0:
end = len(string)
value = string[:end]
yield value.strip()
string = string[end:]
parts = _tokenize(';' + value)
name = parts.next()
extra = {}
for part in parts:
if '=' in part:
key, value = part.split('=', 1)
extra[key.strip().lower()] = unquote_header_value(value.strip())
else:
extra[part.strip()] = None
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`CacheControl` object is changed.
:param cls: the class for the returned object. By default
:class:`RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a :class:`HeaderSet` object. The
return value is an object that treats the items case-insensitively and
keeps the order of the items.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`HeaderSet` object is changed.
:return: a :class:`HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`Authorization` object.
:param value: the authorization header to parse.
:return: a :class:`Authorization` object or `None`.
"""
if not value:
return
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == 'basic':
try:
username, password = auth_info.decode('base64').split(':', 1)
except Exception, e:
return
return Authorization('basic', {'username': username,
'password': password})
elif auth_type == 'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'nc', 'cnonce', \
'response':
if not key in auth_map:
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a :class:`WWWAuthenticate`
object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a
value on the :class:`WWWAuthenticate` object is changed.
:return: a :class:`WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
# if no timezone is part of the string we assume UTC
if t[-1] is None:
t = t[:-1] + (0,)
return datetime.utcfromtimestamp(mktime_tz(t))
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return StringIO()
def _make_stream_factory(factory):
"""this exists for backwards compatibility!, will go away in 0.6."""
args, _, _, defaults = inspect.getargspec(factory)
required_args = len(args) - len(defaults or ())
if inspect.ismethod(factory):
required_args -= 1
if required_args != 0:
return factory
from warnings import warn
warn(DeprecationWarning('stream factory passed to `parse_form_data` '
'uses deprecated invokation API.'), stacklevel=4)
return lambda *a: factory()
def _fix_ie_filename(filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] == '\r\n':
return line[:-2], True
elif line[-1:] in '\r\n':
return line[:-1], True
return line, False
def parse_multipart(file, boundary, content_length, stream_factory=None,
charset='utf-8', errors='ignore', buffer_size=10 * 1024,
max_form_memory_size=None):
"""Parse a multipart/form-data stream. This is invoked by
:func:`utils.parse_form_data` if the content type matches. Currently it
exists for internal usage only, but could be exposed as separate
function if it turns out to be useful and if we consider the API stable.
"""
# XXX: this function does not support multipart/mixed. I don't know of
# any browser that supports this, but it should be implemented
# nonetheless.
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
if stream_factory is None:
stream_factory = default_stream_factory
else:
stream_factory = _make_stream_factory(stream_factory)
if not boundary:
raise ValueError('Missing boundary')
if not is_valid_multipart_boundary(boundary):
raise ValueError('Invalid boundary: %s' % boundary)
if len(boundary) > buffer_size:
raise ValueError('Boundary longer than buffer size')
total_content_length = content_length
next_part = '--' + boundary
last_part = next_part + '--'
form = []
files = []
in_memory = 0
# convert the file into a limited stream with iteration capabilities
file = LimitedStream(file, content_length)
iterator = chain(make_line_iter(file, buffer_size=buffer_size),
repeat(''))
def _find_terminator():
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return ''
try:
terminator = _find_terminator()
if terminator != next_part:
raise ValueError('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
raise ValueError('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
filename = extra.get('filename')
name = extra.get('name')
transfer_encoding = headers.get('content-transfer-encoding')
content_type = headers.get('content-type')
if content_type is None:
is_file = False
else:
content_type = parse_options_header(content_type)[0]
is_file = True
if is_file:
if filename is not None:
filename = _fix_ie_filename(_decode_unicode(filename,
charset,
errors))
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
stream = stream_factory(total_content_length, content_type,
filename, content_length)
else:
stream = StringIO()
buf = ''
for line in iterator:
if not line:
raise ValueError('unexpected end of stream')
if line[:2] == '--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding in _supported_multipart_encodings:
try:
line = line.decode(transfer_encoding)
except:
raise ValueError('could not base 64 decode chunk')
# we have something in the buffer from the last iteration.
# write that value to the output stream now and clear the buffer.
if buf:
stream.write(buf)
buf = ''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write everything
# except the last byte. If it was a newline, that's fine, otherwise
# it does not matter because we write it the last iteration. If the
# loop aborts early because the end of a part was reached, the last
# newline is not written which is exactly what we want.
newline_length = line[-2:] == '\r\n' and 2 or 1
stream.write(line[:-newline_length])
buf = line[-newline_length:]
if not is_file and max_form_memory_size is not None:
in_memory += len(line)
if in_memory > max_form_memory_size:
from werkzeug.exceptions import RequestEntityTooLarge
raise RequestEntityTooLarge()
else:
raise ValueError('unexpected end of part')
# rewind the stream
stream.seek(0)
if is_file:
files.append((name, FileStorage(stream, filename, name,
content_type,
content_length)))
else:
form.append((name, _decode_unicode(stream.read(),
charset, errors)))
finally:
# make sure the whole input stream is read
file.exhaust()
return form, files
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol.
"""
result = []
for line in iterable:
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
return Headers(result)
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparision.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, basestring):
last_modified = parse_date(last_modified)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_pop_headers
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
# circular dependency fun
from werkzeug.utils import make_line_iter, FileStorage, LimitedStream
from werkzeug.datastructures import Headers, Accept, RequestCacheControl, \
ResponseCacheControl, HeaderSet, ETags, Authorization, \
WWWAuthenticate
# DEPRECATED
# backwards compatibible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, LanguageAccept
| mpl-2.0 |
jhawkesworth/ansible | lib/ansible/modules/network/eos/eos_interface.py | 23 | 15169 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_interface
version_added: "2.5"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage Interface on Arista EOS network devices
description:
- This module provides declarative management of Interfaces
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the Interface to be configured on remote device. The name of interface
should be in expanded format and not abbreviated.
required: true
description:
description:
- Description of Interface upto 240 characters.
enabled:
description:
- Interface link status. If the value is I(True) the interface state will be
enabled, else if value is I(False) interface will be in disable (shutdown) state.
default: True
type: bool
speed:
description:
- This option configures autoneg and speed/duplex/flowcontrol for the interface
given in C(name) option.
mtu:
description:
- Set maximum transmission unit size in bytes of transmit packet for the interface given
in C(name) option.
tx_rate:
description:
- Transmit rate in bits per second (bps) for the interface given in C(name) option.
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
rx_rate:
description:
- Receiver rate in bits per second (bps) for the interface given in C(name) option.
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
neighbors:
description:
- Check the operational state of given interface C(name) for LLDP neighbor.
- The following suboptions are available.
suboptions:
host:
description:
- "LLDP neighbor host for given interface C(name)."
port:
description:
- "LLDP neighbor port to which given interface C(name) is connected."
aggregate:
description:
- List of Interfaces definitions. Each of the entry in aggregate list should
define name of interface C(name) and other options as required.
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state argument which are
I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate).
default: 10
state:
description:
- State of the Interface configuration, C(up) means present and
operationally up and C(down) means present and operationally C(down)
default: present
choices: ['present', 'absent', 'up', 'down']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: configure interface
eos_interface:
name: ethernet1
description: test-interface
speed: 100full
mtu: 512
- name: remove interface
eos_interface:
name: ethernet1
state: absent
- name: make interface up
eos_interface:
name: ethernet1
enabled: True
- name: make interface down
eos_interface:
name: ethernet1
enabled: False
- name: Check intent arguments
eos_interface:
name: ethernet1
state: up
tx_rate: ge(0)
rx_rate: le(0)
- name: Check neighbors intent arguments
eos_interface:
name: ethernet1
neighbors:
- port: eth0
host: netdev
- name: Configure interface in disabled state and check if the operational state is disabled or not
eos_interface:
name: ethernet1
enabled: False
state: down
- name: Add interface using aggregate
eos_interface:
aggregate:
- { name: ethernet1, mtu: 256, description: test-interface-1 }
- { name: ethernet2, mtu: 516, description: test-interface-2 }
speed: 100full
state: present
- name: Delete interface using aggregate
eos_interface:
aggregate:
- name: loopback9
- name: loopback10
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface ethernet1
- description test-interface
- speed 100full
- mtu 512
"""
import re
from copy import deepcopy
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import conditional, remove_default_spec
from ansible.module_utils.network.eos.eos import get_config, load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec
def validate_mtu(value, module):
if value and not 68 <= int(value) <= 65535:
module.fail_json(msg='mtu must be between 68 and 65535')
def validate_param_values(module, obj, param=None):
if param is None:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def parse_shutdown(configobj, name):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'shutdown', cfg, re.M)
return bool(match)
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1)
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def add_command_to_interface(interface, cmd, commands):
if interface not in commands:
commands.append(interface)
commands.append(cmd)
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=3, contents=config)
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
obj = {
'name': item.lower(),
'description': parse_config_argument(configobj, item, 'description'),
'speed': parse_config_argument(configobj, item, 'speed'),
'mtu': parse_config_argument(configobj, item, 'mtu'),
'disable': parse_shutdown(configobj, item),
'state': 'present'
}
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
item['name'] = item['name'].lower()
validate_param_values(module, item, item)
d = item.copy()
if d['enabled']:
d['disable'] = False
else:
d['disable'] = True
obj.append(d)
else:
params = {
'name': module.params['name'].lower(),
'description': module.params['description'],
'speed': module.params['speed'],
'mtu': module.params['mtu'],
'state': module.params['state'],
'delay': module.params['delay'],
'tx_rate': module.params['tx_rate'],
'rx_rate': module.params['rx_rate'],
'neighbors': module.params['neighbors']
}
validate_param_values(module, params)
if module.params['enabled']:
params.update({'disable': False})
else:
params.update({'disable': True})
obj.append(params)
return obj
def map_obj_to_commands(updates, modules):
commands = list()
want, have = updates
args = ('speed', 'description', 'mtu')
for w in want:
name = w['name']
disable = w['disable']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
interface = 'interface ' + name
if state == 'absent' and obj_in_have:
commands.append('no ' + interface)
elif state in ('present', 'up', 'down'):
if obj_in_have:
for item in args:
candidate = w.get(item)
running = obj_in_have.get(item)
if candidate != running:
if candidate:
cmd = "{0} {1}".format(item, candidate)
add_command_to_interface(interface, cmd, commands)
if disable and not obj_in_have.get('disable', False):
add_command_to_interface(interface, 'shutdown', commands)
elif not disable and obj_in_have.get('disable', False):
add_command_to_interface(interface, 'no shutdown', commands)
else:
commands.append(interface)
for item in args:
value = w.get(item)
if value:
commands.append("{0} {1}".format(item, value))
if disable:
commands.append('no shutdown')
return commands
def check_declarative_intent_params(module, want, result):
failed_conditions = []
have_neighbors = None
for w in want:
want_state = w.get('state')
want_tx_rate = w.get('tx_rate')
want_rx_rate = w.get('rx_rate')
want_neighbors = w.get('neighbors')
if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors:
continue
if result['changed']:
sleep(w['delay'])
command = {'command': 'show interfaces %s' % w['name'], 'output': 'text'}
output = run_commands(module, [command])
if want_state in ('up', 'down'):
match = re.search(r'%s (\w+)' % 'line protocol is', output[0], re.M)
have_state = None
if match:
have_state = match.group(1)
if have_state is None or not conditional(want_state, have_state.strip()):
failed_conditions.append('state ' + 'eq(%s)' % want_state)
if want_tx_rate:
match = re.search(r'%s (\d+)' % 'output rate', output[0], re.M)
have_tx_rate = None
if match:
have_tx_rate = match.group(1)
if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
match = re.search(r'%s (\d+)' % 'input rate', output[0], re.M)
have_rx_rate = None
if match:
have_rx_rate = match.group(1)
if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
if want_neighbors:
have_host = []
have_port = []
if have_neighbors is None:
command = {'command': 'show lldp neighbors {0}'.format(w['name']), 'output': 'text'}
have_neighbors = run_commands(module, [command])
if have_neighbors[0]:
lines = have_neighbors[0].strip().split('\n')
col = None
for index, line in enumerate(lines):
if re.search(r"^Port\s+Neighbor Device ID\s+Neighbor Port ID\s+TTL", line):
col = index
break
if col and col < len(lines) - 1:
for items in lines[col + 1:]:
value = re.split(r'\s+', items)
try:
have_port.append(value[2])
have_host.append(value[1])
except IndexError:
pass
for item in want_neighbors:
host = item.get('host')
port = item.get('port')
if host and host not in have_host:
failed_conditions.append('host ' + host)
if port and port not in have_port:
failed_conditions.append('port ' + port)
return failed_conditions
def main():
""" main entry point for module execution
"""
neighbors_spec = dict(
host=dict(),
port=dict()
)
element_spec = dict(
name=dict(),
description=dict(),
speed=dict(),
mtu=dict(),
enabled=dict(default=True, type='bool'),
tx_rate=dict(),
rx_rate=dict(),
neighbors=dict(type='list', elements='dict', options=neighbors_spec),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
failed_conditions = check_declarative_intent_params(module, want, result)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lkorigin/laniakea | src/web/lkweb/decorators.py | 4 | 1120 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps
from flask import abort
from flask_login import current_user
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.is_admin():
abort(403)
return f(*args, **kwargs)
return decorated_function
| gpl-3.0 |
perkinslr/pypyjs | website/js/pypy.js-0.2.0/lib/modules/distutils/cygwinccompiler.py | 8 | 17829 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.gcc_version < '4' or is_cygwingcc():
no_cygwin = ' -mno-cygwin'
else:
no_cygwin = ''
self.set_executables(compiler='gcc%s -O -Wall' % no_cygwin,
compiler_so='gcc%s -mdll -O -Wall' % no_cygwin,
compiler_cxx='g++%s -O -Wall' % no_cygwin,
linker_exe='gcc%s' % no_cygwin,
linker_so='%s%s %s %s'
% (self.linker_dll, no_cygwin,
shared_option, entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out = os.popen('gcc -dumpmachine', 'r')
out_string = out.read()
out.close()
# out_string is the target triplet cpu-vendor-os
# Cygwin's gcc sets the os to 'cygwin'
return out_string.strip().endswith('cygwin')
| mit |
srajag/nova | nova/api/openstack/compute/plugins/v3/cells.py | 3 | 11616 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from oslo import messaging
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import cells
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.i18n import _
from nova.openstack.common import strutils
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(501)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(501)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(501)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((403, 404, 501))
@common.check_cells_enabled
@wsgi.response(204)
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="delete")
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
@extensions.expected_errors((400, 403, 501))
@common.check_cells_enabled
@wsgi.response(201)
@validation.schema(cells.create)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="create")
cell = body['cell']
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404, 501))
@common.check_cells_enabled
@validation.schema(cells.update)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="update")
cell = body['cell']
cell.pop('id', None)
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 501))
@common.check_cells_enabled
@wsgi.response(204)
@validation.schema(cells.sync_instances)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="sync_instances")
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if isinstance(deleted, six.string_types):
deleted = strutils.bool_from_string(deleted, strict=True)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 |
bytedance/fedlearner | web_console_v2/api/test/fedlearner_webconsole/utils/file_manager_test.py | 1 | 9062 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import shutil
import tempfile
import unittest
from collections import namedtuple
from pathlib import Path
from tensorflow.io import gfile
from fedlearner_webconsole.utils.file_manager import GFileFileManager, FileManager, File
FakeFileStatistics = namedtuple('FakeFileStatistics', ['length', 'mtime_nsec'])
class GFileFileManagerTest(unittest.TestCase):
_F1_SIZE = 3
_F2_SIZE = 4
_S1_SIZE = 55
_F1_MTIME = 1613982390
_F2_MTIME = 1613982391
_S1_MTIME = 1613982392
def _get_file_stat(self, orig_os_stat, path):
gfile_stat = FakeFileStatistics(2, 1613982390 * 1e9)
if path == self._get_temp_path('f1.txt') or \
path == self._get_temp_path('subdir/f1.txt'):
gfile_stat = FakeFileStatistics(self._F1_SIZE,
self._F1_MTIME * 1e9)
return gfile_stat
elif path == self._get_temp_path('f2.txt') or \
path == self._get_temp_path('f3.txt'):
gfile_stat = FakeFileStatistics(self._F2_SIZE,
self._F2_MTIME * 1e9)
return gfile_stat
elif path == self._get_temp_path('subdir/s1.txt'):
gfile_stat = FakeFileStatistics(self._S1_SIZE,
self._S1_MTIME * 1e9)
return gfile_stat
else:
return orig_os_stat(path)
def setUp(self):
# Create a temporary directory
self._test_dir = tempfile.mkdtemp()
subdir = Path(self._test_dir).joinpath('subdir')
subdir.mkdir(exist_ok=True)
Path(self._test_dir).joinpath('f1.txt').write_text('xxx')
Path(self._test_dir).joinpath('f2.txt').write_text('xxx')
subdir.joinpath('s1.txt').write_text('xxx')
# Mocks os.stat
self._orig_os_stat = os.stat
def fake_stat(path, *arg, **kwargs):
return self._get_file_stat(self._orig_os_stat, path)
gfile.stat = fake_stat
self._fm = GFileFileManager()
def tearDown(self):
os.stat = self._orig_os_stat
# Remove the directory after the test
shutil.rmtree(self._test_dir)
def _get_temp_path(self, file_path: str = None) -> str:
return str(Path(self._test_dir, file_path or '').absolute())
def test_can_handle(self):
self.assertTrue(self._fm.can_handle('/data/abc'))
self.assertFalse(self._fm.can_handle('data'))
def test_ls(self):
# List file
self.assertEqual(self._fm.ls(self._get_temp_path('f1.txt')), [
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
# List folder
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path()),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
],
key=lambda file: file.path))
# List folder recursively
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path(), recursive=True),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME),
File(path=self._get_temp_path('subdir/s1.txt'),
size=self._S1_SIZE,
mtime=self._S1_MTIME),
],
key=lambda file: file.path))
def test_move(self):
# Moves to another folder
self._fm.move(self._get_temp_path('f1.txt'),
self._get_temp_path('subdir/'))
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path('subdir')),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('subdir/s1.txt'),
size=self._S1_SIZE,
mtime=self._S1_MTIME),
File(path=self._get_temp_path('subdir/f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
],
key=lambda file: file.path))
# Renames
self._fm.move(self._get_temp_path('f2.txt'),
self._get_temp_path('f3.txt'))
with self.assertRaises(ValueError):
self._fm.ls(self._get_temp_path('f2.txt'))
self.assertEqual(self._fm.ls(self._get_temp_path('f3.txt')), [
File(path=self._get_temp_path('f3.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
])
def test_remove(self):
self._fm.remove(self._get_temp_path('f1.txt'))
self._fm.remove(self._get_temp_path('subdir'))
self.assertEqual(self._fm.ls(self._get_temp_path(), recursive=True), [
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
])
def test_copy(self):
self._fm.copy(self._get_temp_path('f1.txt'),
self._get_temp_path('subdir'))
self.assertEqual(self._fm.ls(self._get_temp_path('f1.txt')), [
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
self.assertEqual(self._fm.ls(self._get_temp_path('subdir/f1.txt')), [
File(path=self._get_temp_path('subdir/f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
def test_mkdir(self):
self._fm.mkdir(os.path.join(self._get_temp_path(), 'subdir2'))
self.assertTrue(os.path.isdir(self._get_temp_path('subdir2')))
def test_read(self):
content = self._fm.read(self._get_temp_path('f1.txt'))
self.assertEqual('xxx', content)
class FileManagerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
fake_fm = 'testing.fake_file_manager:FakeFileManager'
os.environ['CUSTOMIZED_FILE_MANAGER'] = fake_fm
@classmethod
def tearDownClass(cls):
del os.environ['CUSTOMIZED_FILE_MANAGER']
def setUp(self):
self._fm = FileManager()
def test_can_handle(self):
self.assertTrue(self._fm.can_handle('fake://123'))
# Falls back to default manager
self.assertTrue(self._fm.can_handle('/data/123'))
self.assertFalse(self._fm.can_handle('unsupported:///123'))
def test_ls(self):
self.assertEqual(self._fm.ls('fake://data'), [{
'path': 'fake://data/f1.txt',
'size': 0
}])
def test_move(self):
self.assertTrue(self._fm.move('fake://move/123', 'fake://move/234'))
self.assertFalse(
self._fm.move('fake://do_not_move/123', 'fake://move/234'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.move('hdfs://123', 'fake://abc'))
def test_remove(self):
self.assertTrue(self._fm.remove('fake://remove/123'))
self.assertFalse(self._fm.remove('fake://do_not_remove/123'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.remove('unsupported://123'))
def test_copy(self):
self.assertTrue(self._fm.copy('fake://copy/123', 'fake://copy/234'))
self.assertFalse(
self._fm.copy('fake://do_not_copy/123', 'fake://copy/234'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.copy('hdfs://123', 'fake://abc'))
def test_mkdir(self):
self.assertTrue(self._fm.mkdir('fake://mkdir/123'))
self.assertFalse(self._fm.mkdir('fake://do_not_mkdir/123'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.mkdir('unsupported:///123'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
HLFH/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mpora.py | 22 | 1990 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class MporaIE(InfoExtractor):
_VALID_URL = r'^https?://(www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
IE_NAME = 'MPORA'
_TEST = {
'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de',
'file': 'AAdo8okx4wiz.mp4',
'md5': 'a7a228473eedd3be741397cf452932eb',
'info_dict': {
'title': 'Katy Curd - Winter in the Forest',
'duration': 416,
'uploader': 'Peter Newman Media',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
data_json = self._search_regex(
r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", webpage, 'json')
data = json.loads(data_json)
uploader = data['info_overlay'].get('username')
duration = data['video']['duration'] // 1000
thumbnail = data['video']['encodings']['sd']['poster']
title = data['info_overlay']['title']
formats = []
for encoding_id, edata in data['video']['encodings'].items():
for src in edata['sources']:
width_str = self._search_regex(
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
False, default=None)
vcodec = src['type'].partition('/')[2]
formats.append({
'format_id': encoding_id + '-' + vcodec,
'url': src['src'],
'vcodec': vcodec,
'width': int_or_none(width_str),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'uploader': uploader,
'duration': duration,
'thumbnail': thumbnail,
}
| gpl-3.0 |
Logicalmars/appengine-keeptrack | pytz/zoneinfo/America/Grand_Turk.py | 9 | 5297 | '''tzinfo timezone information for America/Grand_Turk.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Grand_Turk(DstTzInfo):
'''America/Grand_Turk timezone definition. See datetime.tzinfo for details'''
zone = 'America/Grand_Turk'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1912,2,1,5,7,12),
d(1979,4,29,5,0,0),
d(1979,10,28,4,0,0),
d(1980,4,27,5,0,0),
d(1980,10,26,4,0,0),
d(1981,4,26,5,0,0),
d(1981,10,25,4,0,0),
d(1982,4,25,5,0,0),
d(1982,10,31,4,0,0),
d(1983,4,24,5,0,0),
d(1983,10,30,4,0,0),
d(1984,4,29,5,0,0),
d(1984,10,28,4,0,0),
d(1985,4,28,5,0,0),
d(1985,10,27,4,0,0),
d(1986,4,27,5,0,0),
d(1986,10,26,4,0,0),
d(1987,4,5,5,0,0),
d(1987,10,25,4,0,0),
d(1988,4,3,5,0,0),
d(1988,10,30,4,0,0),
d(1989,4,2,5,0,0),
d(1989,10,29,4,0,0),
d(1990,4,1,5,0,0),
d(1990,10,28,4,0,0),
d(1991,4,7,5,0,0),
d(1991,10,27,4,0,0),
d(1992,4,5,5,0,0),
d(1992,10,25,4,0,0),
d(1993,4,4,5,0,0),
d(1993,10,31,4,0,0),
d(1994,4,3,5,0,0),
d(1994,10,30,4,0,0),
d(1995,4,2,5,0,0),
d(1995,10,29,4,0,0),
d(1996,4,7,5,0,0),
d(1996,10,27,4,0,0),
d(1997,4,6,5,0,0),
d(1997,10,26,4,0,0),
d(1998,4,5,5,0,0),
d(1998,10,25,4,0,0),
d(1999,4,4,5,0,0),
d(1999,10,31,4,0,0),
d(2000,4,2,5,0,0),
d(2000,10,29,4,0,0),
d(2001,4,1,5,0,0),
d(2001,10,28,4,0,0),
d(2002,4,7,5,0,0),
d(2002,10,27,4,0,0),
d(2003,4,6,5,0,0),
d(2003,10,26,4,0,0),
d(2004,4,4,5,0,0),
d(2004,10,31,4,0,0),
d(2005,4,3,5,0,0),
d(2005,10,30,4,0,0),
d(2006,4,2,5,0,0),
d(2006,10,29,4,0,0),
d(2007,4,1,5,0,0),
d(2007,10,28,4,0,0),
d(2008,4,6,5,0,0),
d(2008,10,26,4,0,0),
d(2009,4,5,5,0,0),
d(2009,10,25,4,0,0),
d(2010,4,4,5,0,0),
d(2010,10,31,4,0,0),
d(2011,4,3,5,0,0),
d(2011,10,30,4,0,0),
d(2012,4,1,5,0,0),
d(2012,10,28,4,0,0),
d(2013,4,7,5,0,0),
d(2013,10,27,4,0,0),
d(2014,4,6,5,0,0),
d(2014,10,26,4,0,0),
d(2015,4,5,5,0,0),
d(2015,10,25,4,0,0),
d(2016,4,3,5,0,0),
d(2016,10,30,4,0,0),
d(2017,4,2,5,0,0),
d(2017,10,29,4,0,0),
d(2018,4,1,5,0,0),
d(2018,10,28,4,0,0),
d(2019,4,7,5,0,0),
d(2019,10,27,4,0,0),
d(2020,4,5,5,0,0),
d(2020,10,25,4,0,0),
d(2021,4,4,5,0,0),
d(2021,10,31,4,0,0),
d(2022,4,3,5,0,0),
d(2022,10,30,4,0,0),
d(2023,4,2,5,0,0),
d(2023,10,29,4,0,0),
d(2024,4,7,5,0,0),
d(2024,10,27,4,0,0),
d(2025,4,6,5,0,0),
d(2025,10,26,4,0,0),
d(2026,4,5,5,0,0),
d(2026,10,25,4,0,0),
d(2027,4,4,5,0,0),
d(2027,10,31,4,0,0),
d(2028,4,2,5,0,0),
d(2028,10,29,4,0,0),
d(2029,4,1,5,0,0),
d(2029,10,28,4,0,0),
d(2030,4,7,5,0,0),
d(2030,10,27,4,0,0),
d(2031,4,6,5,0,0),
d(2031,10,26,4,0,0),
d(2032,4,4,5,0,0),
d(2032,10,31,4,0,0),
d(2033,4,3,5,0,0),
d(2033,10,30,4,0,0),
d(2034,4,2,5,0,0),
d(2034,10,29,4,0,0),
d(2035,4,1,5,0,0),
d(2035,10,28,4,0,0),
d(2036,4,6,5,0,0),
d(2036,10,26,4,0,0),
d(2037,4,5,5,0,0),
d(2037,10,25,4,0,0),
]
_transition_info = [
i(-18420,0,'KMT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Grand_Turk = Grand_Turk()
| bsd-3-clause |
dya2/python-for-android | python-modules/twisted/twisted/python/dxprofile.py | 61 | 1528 | # Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DEPRECATED since Twisted 8.0.
Utility functions for reporting bytecode frequencies to Skip Montanaro's
stat collector.
This module requires a version of Python build with DYNAMIC_EXCUTION_PROFILE,
and optionally DXPAIRS, defined to be useful.
"""
import sys, types, xmlrpclib, warnings
warnings.warn("twisted.python.dxprofile is deprecated since Twisted 8.0.",
category=DeprecationWarning)
def rle(iterable):
"""
Run length encode a list.
"""
iterable = iter(iterable)
runlen = 1
result = []
try:
previous = iterable.next()
except StopIteration:
return []
for element in iterable:
if element == previous:
runlen = runlen + 1
continue
else:
if isinstance(previous, (types.ListType, types.TupleType)):
previous = rle(previous)
result.append([previous, runlen])
previous = element
runlen = 1
if isinstance(previous, (types.ListType, types.TupleType)):
previous = rle(previous)
result.append([previous, runlen])
return result
def report(email, appname):
"""
Send an RLE encoded version of sys.getdxp() off to our Top Men (tm)
for analysis.
"""
if hasattr(sys, 'getdxp') and appname:
dxp = xmlrpclib.ServerProxy("http://manatee.mojam.com:7304")
dxp.add_dx_info(appname, email, sys.version_info[:3], rle(sys.getdxp()))
| apache-2.0 |
maniteja123/sympy | sympy/physics/vector/__init__.py | 116 | 1123 | __all__ = []
# The following pattern is used below for importing sub-modules:
#
# 1. "from foo import *". This imports all the names from foo.__all__ into
# this module. But, this does not put those names into the __all__ of
# this module. This enables "from sympy.physics.vector import ReferenceFrame" to
# work.
# 2. "import foo; __all__.extend(foo.__all__)". This adds all the names in
# foo.__all__ to the __all__ of this module. The names in __all__
# determine which names are imported when
# "from sympy.physics.vector import *" is done.
from . import frame
from .frame import *
__all__.extend(frame.__all__)
from . import dyadic
from .dyadic import *
__all__.extend(dyadic.__all__)
from . import vector
from .vector import *
__all__.extend(vector.__all__)
from . import point
from .point import *
__all__.extend(point.__all__)
from . import functions
from .functions import *
__all__.extend(functions.__all__)
from . import printing
from .printing import *
__all__.extend(printing.__all__)
from . import fieldfunctions
from .fieldfunctions import *
__all__.extend(fieldfunctions.__all__)
| bsd-3-clause |
Phixyn/ZoeyBot | modules/utils.py | 1 | 1119 | """
utils.py - Utilities module
ZoeyBot - Python IRC Bot
Copyright 2012-2014 (c) Phixyn
This file is part of ZoeyBot.
ZoeyBot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ZoeyBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ZoeyBot. If not, see <http://www.gnu.org/licenses/>.
"""
import os, subprocess
from datetime import datetime as dt
def timestamp():
""" Documentation pending """
return dt.strftime(dt.now(), "(%H:%M:%S)")
def clear_screen():
""" Documentation pending """
# TODO try...except block here maybe?
if (os.name == 'nt'):
subprocess.call('cls', shell=True)
elif (os.name == 'posix'):
subprocess.call('clear')
else:
print(chr(27) + "[2J")
| gpl-3.0 |
tangyiyong/odoo | addons/website/controllers/main.py | 43 | 20211 | # -*- coding: utf-8 -*-
import cStringIO
import datetime
from itertools import islice
import json
import xml.etree.ElementTree as ET
import logging
import re
import werkzeug.utils
import urllib2
import werkzeug.wrappers
from PIL import Image
import openerp
from openerp.addons.web.controllers.main import WebClient
from openerp.addons.web import http
from openerp.http import request, STATIC_CACHE
from openerp.tools import image_save_for_web
logger = logging.getLogger(__name__)
# Completely arbitrary limits
MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT = IMAGE_LIMITS = (1024, 768)
LOC_PER_SITEMAP = 45000
SITEMAP_CACHE_TIME = datetime.timedelta(hours=12)
class Website(openerp.addons.web.controllers.main.Home):
#------------------------------------------------------
# View
#------------------------------------------------------
@http.route('/', type='http', auth="public", website=True)
def index(self, **kw):
page = 'homepage'
try:
main_menu = request.registry['ir.model.data'].get_object(request.cr, request.uid, 'website', 'main_menu')
except Exception:
pass
else:
first_menu = main_menu.child_id and main_menu.child_id[0]
if first_menu:
if not (first_menu.url.startswith(('/page/', '/?', '/#')) or (first_menu.url=='/')):
return request.redirect(first_menu.url)
if first_menu.url.startswith('/page/'):
return request.registry['ir.http'].reroute(first_menu.url)
return self.page(page)
@http.route(website=True, auth="public")
def web_login(self, *args, **kw):
# TODO: can't we just put auth=public, ... in web client ?
return super(Website, self).web_login(*args, **kw)
@http.route('/website/lang/<lang>', type='http', auth="public", website=True, multilang=False)
def change_lang(self, lang, r='/', **kwargs):
if lang == 'default':
lang = request.website.default_lang_code
r = '/%s%s' % (lang, r or '/')
redirect = werkzeug.utils.redirect(r or ('/%s' % lang), 303)
redirect.set_cookie('website_lang', lang)
return redirect
@http.route('/page/<page:page>', type='http', auth="public", website=True)
def page(self, page, **opt):
values = {
'path': page,
}
# /page/website.XXX --> /page/XXX
if page.startswith('website.'):
return request.redirect('/page/' + page[8:], code=301)
elif '.' not in page:
page = 'website.%s' % page
try:
request.website.get_template(page)
except ValueError, e:
# page not found
if request.website.is_publisher():
page = 'website.page_404'
else:
return request.registry['ir.http']._handle_exception(e, 404)
return request.render(page, values)
@http.route(['/robots.txt'], type='http', auth="public")
def robots(self):
return request.render('website.robots', {'url_root': request.httprequest.url_root}, mimetype='text/plain')
@http.route('/sitemap.xml', type='http', auth="public", website=True)
def sitemap_xml_index(self):
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
ira = request.registry['ir.attachment']
iuv = request.registry['ir.ui.view']
mimetype ='application/xml;charset=utf-8'
content = None
def create_sitemap(url, content):
ira.create(cr, uid, dict(
datas=content.encode('base64'),
mimetype=mimetype,
type='binary',
name=url,
url=url,
), context=context)
sitemap = ira.search_read(cr, uid, [('url', '=' , '/sitemap.xml'), ('type', '=', 'binary')], ('datas', 'create_date'), context=context)
if sitemap:
# Check if stored version is still valid
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format)
delta = datetime.datetime.now() - create_date
if delta < SITEMAP_CACHE_TIME:
content = sitemap[0]['datas'].decode('base64')
if not content:
# Remove all sitemaps in ir.attachments as we're going to regenerated them
sitemap_ids = ira.search(cr, uid, [('url', '=like' , '/sitemap%.xml'), ('type', '=', 'binary')], context=context)
if sitemap_ids:
ira.unlink(cr, uid, sitemap_ids, context=context)
pages = 0
first_page = None
locs = request.website.sudo(user=request.website.user_id.id).enumerate_pages()
while True:
start = pages * LOC_PER_SITEMAP
values = {
'locs': islice(locs, start, start + LOC_PER_SITEMAP),
'url_root': request.httprequest.url_root[:-1],
}
urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context)
if urls.strip():
page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context)
if not first_page:
first_page = page
pages += 1
create_sitemap('/sitemap-%d.xml' % pages, page)
else:
break
if not pages:
return request.not_found()
elif pages == 1:
content = first_page
else:
# Sitemaps must be split in several smaller files with a sitemap index
content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict(
pages=range(1, pages + 1),
url_root=request.httprequest.url_root,
), context=context)
create_sitemap('/sitemap.xml', content)
return request.make_response(content, [('Content-Type', mimetype)])
@http.route('/website/info', type='http', auth="public", website=True)
def website_info(self):
try:
request.website.get_template('website.info').name
except Exception, e:
return request.registry['ir.http']._handle_exception(e, 404)
irm = request.env()['ir.module.module'].sudo()
apps = irm.search([('state','=','installed'),('application','=',True)])
modules = irm.search([('state','=','installed'),('application','=',False)])
values = {
'apps': apps,
'modules': modules,
'version': openerp.service.common.exp_version()
}
return request.render('website.info', values)
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route('/website/add/<path:path>', type='http', auth="user", website=True)
def pagenew(self, path, noredirect=False, add_menu=None):
xml_id = request.registry['website'].new_page(request.cr, request.uid, path, context=request.context)
if add_menu:
model, id = request.registry["ir.model.data"].get_object_reference(request.cr, request.uid, 'website', 'main_menu')
request.registry['website.menu'].create(request.cr, request.uid, {
'name': path,
'url': "/page/" + xml_id[8:],
'parent_id': id,
}, context=request.context)
# Reverse action in order to allow shortcut for /page/<website_xml_id>
url = "/page/" + re.sub(r"^website\.", '', xml_id)
if noredirect:
return werkzeug.wrappers.Response(url, mimetype='text/plain')
return werkzeug.utils.redirect(url)
@http.route('/website/theme_change', type='http', auth="user", website=True)
def theme_change(self, theme_id=False, **kwargs):
imd = request.registry['ir.model.data']
Views = request.registry['ir.ui.view']
_, theme_template_id = imd.get_object_reference(
request.cr, request.uid, 'website', 'theme')
views = Views.search(request.cr, request.uid, [
('inherit_id', '=', theme_template_id),
], context=request.context)
Views.write(request.cr, request.uid, views, {
'active': False,
}, context=dict(request.context or {}, active_test=True))
if theme_id:
module, xml_id = theme_id.split('.')
_, view_id = imd.get_object_reference(
request.cr, request.uid, module, xml_id)
Views.write(request.cr, request.uid, [view_id], {
'active': True
}, context=dict(request.context or {}, active_test=True))
return request.render('website.themes', {'theme_changed': True})
@http.route(['/website/snippets'], type='json', auth="public", website=True)
def snippets(self):
return request.website._render('website.snippets')
@http.route('/website/reset_templates', type='http', auth='user', methods=['POST'], website=True)
def reset_template(self, templates, redirect='/'):
templates = request.httprequest.form.getlist('templates')
modules_to_update = []
for temp_id in templates:
view = request.registry['ir.ui.view'].browse(request.cr, request.uid, int(temp_id), context=request.context)
if view.page:
continue
view.model_data_id.write({
'noupdate': False
})
if view.model_data_id.module not in modules_to_update:
modules_to_update.append(view.model_data_id.module)
if modules_to_update:
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'in', modules_to_update)], context=request.context)
if module_ids:
module_obj.button_immediate_upgrade(request.cr, request.uid, module_ids, context=request.context)
return request.redirect(redirect)
@http.route('/website/customize_template_get', type='json', auth='user', website=True)
def customize_template_get(self, xml_id, full=False):
return request.registry["ir.ui.view"].customize_template_get(
request.cr, request.uid, xml_id, full=full, context=request.context)
@http.route('/website/get_view_translations', type='json', auth='public', website=True)
def get_view_translations(self, xml_id, lang=None):
lang = lang or request.context.get('lang')
return request.registry["ir.ui.view"].get_view_translations(
request.cr, request.uid, xml_id, lang=lang, context=request.context)
@http.route('/website/set_translations', type='json', auth='public', website=True)
def set_translations(self, data, lang):
irt = request.registry.get('ir.translation')
for view_id, trans in data.items():
view_id = int(view_id)
for t in trans:
initial_content = t['initial_content'].strip()
new_content = t['new_content'].strip()
tid = t['translation_id']
if not tid:
old_trans = irt.search_read(
request.cr, request.uid,
[
('type', '=', 'view'),
('res_id', '=', view_id),
('lang', '=', lang),
('src', '=', initial_content),
])
if old_trans:
tid = old_trans[0]['id']
if tid:
vals = {'value': new_content}
irt.write(request.cr, request.uid, [tid], vals)
else:
new_trans = {
'name': 'website',
'res_id': view_id,
'lang': lang,
'type': 'view',
'source': initial_content,
'value': new_content,
}
if t.get('gengo_translation'):
new_trans['gengo_translation'] = t.get('gengo_translation')
new_trans['gengo_comment'] = t.get('gengo_comment')
irt.create(request.cr, request.uid, new_trans)
return True
@http.route('/website/translations', type='json', auth="public", website=True)
def get_website_translations(self, lang):
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'ilike', 'website'), ('state', '=', 'installed')], context=request.context)
modules = [x['name'] for x in module_obj.read(request.cr, request.uid, module_ids, ['name'], context=request.context)]
return WebClient().translations(mods=modules, lang=lang)
@http.route('/website/attach', type='http', auth='user', methods=['POST'], website=True)
def attach(self, func, upload=None, url=None, disable_optimization=None):
Attachments = request.registry['ir.attachment']
website_url = message = None
if not upload:
website_url = url
name = url.split("/").pop()
attachment_id = Attachments.create(request.cr, request.uid, {
'name': name,
'type': 'url',
'url': url,
'res_model': 'ir.ui.view',
}, request.context)
else:
try:
image_data = upload.read()
image = Image.open(cStringIO.StringIO(image_data))
w, h = image.size
if w*h > 42e6: # Nokia Lumia 1020 photo resolution
raise ValueError(
u"Image size excessive, uploaded images must be smaller "
u"than 42 million pixel")
if not disable_optimization and image.format in ('PNG', 'JPEG'):
image_data = image_save_for_web(image)
attachment_id = Attachments.create(request.cr, request.uid, {
'name': upload.filename,
'datas': image_data.encode('base64'),
'datas_fname': upload.filename,
'res_model': 'ir.ui.view',
}, request.context)
[attachment] = Attachments.read(
request.cr, request.uid, [attachment_id], ['website_url'],
context=request.context)
website_url = attachment['website_url']
except Exception, e:
logger.exception("Failed to upload image to attachment")
message = unicode(e)
return """<script type='text/javascript'>
window.parent['%s'](%s, %s);
</script>""" % (func, json.dumps(website_url), json.dumps(message))
@http.route(['/website/publish'], type='json', auth="public", website=True)
def publish(self, id, object):
_id = int(id)
_object = request.registry[object]
obj = _object.browse(request.cr, request.uid, _id)
values = {}
if 'website_published' in _object._fields:
values['website_published'] = not obj.website_published
_object.write(request.cr, request.uid, [_id],
values, context=request.context)
obj = _object.browse(request.cr, request.uid, _id)
return bool(obj.website_published)
@http.route(['/website/seo_suggest/<keywords>'], type='http', auth="public", website=True)
def seo_suggest(self, keywords):
url = "http://google.com/complete/search"
try:
req = urllib2.Request("%s?%s" % (url, werkzeug.url_encode({
'ie': 'utf8', 'oe': 'utf8', 'output': 'toolbar', 'q': keywords})))
request = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError):
return []
xmlroot = ET.fromstring(request.read())
return json.dumps([sugg[0].attrib['data'] for sugg in xmlroot if len(sugg) and sugg[0].attrib['data']])
#------------------------------------------------------
# Helpers
#------------------------------------------------------
@http.route(['/website/kanban'], type='http', auth="public", methods=['POST'], website=True)
def kanban(self, **post):
return request.website.kanban_col(**post)
def placeholder(self, response):
return request.registry['website']._image_placeholder(response)
@http.route([
'/website/image',
'/website/image/<model>/<id>/<field>',
'/website/image/<model>/<id>/<field>/<int:max_width>x<int:max_height>'
], auth="public", website=True, multilang=False)
def website_image(self, model, id, field, max_width=None, max_height=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~.placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
try:
idsha = id.split('_')
id = idsha[0]
response = werkzeug.wrappers.Response()
return request.registry['website']._image(
request.cr, request.uid, model, id, field, response, max_width, max_height,
cache=STATIC_CACHE if len(idsha) > 1 else None)
except Exception:
logger.exception("Cannot render image field %r of record %s[%s] at size(%s,%s)",
field, model, id, max_width, max_height)
response = werkzeug.wrappers.Response()
return self.placeholder(response)
#------------------------------------------------------
# Server actions
#------------------------------------------------------
@http.route([
'/website/action/<path_or_xml_id_or_id>',
'/website/action/<path_or_xml_id_or_id>/<path:path>',
], type='http', auth="public", website=True)
def actions_server(self, path_or_xml_id_or_id, **post):
cr, uid, context = request.cr, request.uid, request.context
res, action_id, action = None, None, None
ServerActions = request.registry['ir.actions.server']
# find the action_id: either an xml_id, the path, or an ID
if isinstance(path_or_xml_id_or_id, basestring) and '.' in path_or_xml_id_or_id:
action_id = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, request.uid, path_or_xml_id_or_id, raise_if_not_found=False)
if not action_id:
action_ids = ServerActions.search(cr, uid, [('website_path', '=', path_or_xml_id_or_id), ('website_published', '=', True)], context=context)
action_id = action_ids and action_ids[0] or None
if not action_id:
try:
action_id = int(path_or_xml_id_or_id)
except ValueError:
pass
# check it effectively exists
if action_id:
action_ids = ServerActions.exists(cr, uid, [action_id], context=context)
action_id = action_ids and action_ids[0] or None
# run it, return only if we got a Response object
if action_id:
action = ServerActions.browse(cr, uid, action_id, context=context)
if action.state == 'code' and action.website_published:
action_res = ServerActions.run(cr, uid, [action_id], context=context)
if isinstance(action_res, werkzeug.wrappers.Response):
res = action_res
if res:
return res
return request.redirect('/')
| agpl-3.0 |
doraemonext/DEOnlineJudge | lib/tools/validator.py | 1 | 1501 | # -*- coding: utf-8 -*-
import re
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
class MinValue(object):
"""
最小长度验证
"""
def __init__(self, name, length):
self.name = name
self.length = length
def __call__(self, value, *args, **kwargs):
if len(value) < self.length:
raise ValidationError(u'%s最小长度为%d个字符' % (self.name, self.length))
class MaxValue(object):
"""
最大长度验证
"""
def __init__(self, name, length):
self.name = name
self.length = length
def __call__(self, value, *args, **kwargs):
if len(value) > self.length:
raise ValidationError(u'%s最大长度为%d个字符' % (self.name, self.length))
class SafeValue(object):
"""
安全字符验证
仅允许包含汉字、数字、字母、下划线及短横线
"""
def __init__(self, name):
self.name = name
def __call__(self, value, *args, **kwargs):
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5\-]+$', value):
raise ValidationError(u'%s包含非法字符' % self.name)
class EmailValue(object):
"""
电子邮件验证
"""
def __init__(self, name):
self.name = name
def __call__(self, value, *args, **kwargs):
try:
validate_email(value)
except ValidationError:
raise ValidationError(u'%s不合法' % self.name)
| mit |
vhaupert/mitmproxy | mitmproxy/proxy/config.py | 1 | 3244 | import os
import re
import typing
from OpenSSL import crypto
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy.net import server_spec
class HostMatcher:
def __init__(self, handle, patterns=tuple()):
self.handle = handle
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if self.handle in ["ignore", "tcp"]:
return any(rex.search(host) for rex in self.regexes)
else: # self.handle == "allow"
return not any(rex.search(host) for rex in self.regexes)
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.certstore: certs.CertStore
self.check_filter: typing.Optional[HostMatcher] = None
self.check_tcp: typing.Optional[HostMatcher] = None
self.upstream_server: typing.Optional[server_spec.ServerSpec] = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: typing.Any) -> None:
if options.allow_hosts and options.ignore_hosts:
raise exceptions.OptionsError("--ignore-hosts and --allow-hosts are mutually "
"exclusive; please choose one.")
if options.ignore_hosts:
self.check_filter = HostMatcher("ignore", options.ignore_hosts)
elif options.allow_hosts:
self.check_filter = HostMatcher("allow", options.allow_hosts)
else:
self.check_filter = HostMatcher(False)
if "tcp_hosts" in updated:
self.check_tcp = HostMatcher("tcp", options.tcp_hosts)
certstore_path = os.path.expanduser(options.confdir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(certstore_path)
)
key_size = options.key_size
self.certstore = certs.CertStore.from_store(
certstore_path,
moptions.CONF_BASENAME,
key_size
)
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
| mit |
hrayr-artunyan/shuup | shuup/reports/admin_module/views.py | 2 | 2404 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import six
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from shuup.reports.report import get_report_classes
from shuup.reports.writer import get_writer_instance
class ReportView(FormView):
template_name = "shuup/reports/report.jinja"
form_class = None
add_form_errors_as_messages = True
def get_form(self, form_class=None):
self.report_classes = get_report_classes()
selected_report = self.request.GET.get("report")
if selected_report:
return self._get_concrete_form(selected_report)
return self._get_type_choice_form()
def _get_concrete_form(self, selected_report):
form_info = self.report_classes[selected_report]
self.form_class = form_info.form_class
return self._get_form(form_info)
def _get_type_choice_form(self):
selected_report = self.request.GET.get("report")
form_info = self.report_classes[selected_report] if selected_report else None
if not form_info:
form_info = six.next(six.itervalues(get_report_classes()))
self.form_class = form_info.form_class
return self._get_form(form_info)
def _get_choices(self):
return [(k, v.title) for k, v in six.iteritems(get_report_classes())]
def _get_form(self, selected):
form = self.form_class(**self.get_form_kwargs())
report_field = forms.ChoiceField(
choices=self._get_choices(),
label=_("Type"),
required=True,
initial=selected.identifier,
)
form.fields["report"] = report_field
return form
def form_valid(self, form):
writer = get_writer_instance(form.cleaned_data["writer"])
report = form.get_report_instance()
if not self.request.POST.get("force_download") and writer.writer_type in ("html", "pprint", "json"):
output = writer.render_report(report, inline=True)
return self.render_to_response(self.get_context_data(form=form, result=output, current_report=report))
return writer.get_response(report=report)
| agpl-3.0 |
wagtail/wagtail | wagtail/core/hooks.py | 5 | 2848 | from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hooks, order):
self.hooks = hooks
self.order = order
def __enter__(self):
for hook_name, fn in self.hooks:
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
for hook_name, fn in self.hooks:
_hooks[hook_name].remove((fn, self.order))
def register_temporarily(hook_name_or_hooks, fn=None, *, order=0):
"""
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
To register multiple hooks at the same time, pass in a list of 2-tuples:
def my_hook(...):
pass
def my_other_hook(...):
pass
with hooks.register_temporarily([
('hook_name', my_hook),
('hook_name', my_other_hook),
]):
# Hooks are registered here
"""
if not isinstance(hook_name_or_hooks, list) and fn is not None:
hooks = [(hook_name_or_hooks, fn)]
else:
hooks = hook_name_or_hooks
return TemporaryHook(hooks, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules('wagtail_hooks'))
_searched_for_hooks = True
def get_hooks(hook_name):
""" Return the hooks function sorted by their order. """
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
| bsd-3-clause |
ppries/tensorflow | tensorflow/python/kernel_tests/edit_distance_op_test.py | 21 | 7875 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU": x = np.asarray(x, dtype=np.int64)
return tf.constant(x)
class EditDistanceTest(tf.test.TestCase):
def _testEditDistanceST(
self, hypothesis_st, truth_st, normalize, expected_output,
expected_shape, expected_err_re=None):
edit_distance = tf.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def _testEditDistance(self, hypothesis, truth, normalize,
expected_output, expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]]
# SparseTensorValue inputs.
with tf.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=tf.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
truth_st=tf.SparseTensorValue(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
with tf.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=tf.SparseTensor(*[ConstantOf(x) for x in hypothesis]),
truth_st=tf.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
hypothesis_values = [0, 1,
1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0],
[1, 0], [1, 1]]
truth_values = [0,
1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0],
[1, 0], [1, 1]]
hypothesis_values = [10,
10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
truth_values = [1, 2,
1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0/len("altruistic"),
6.0/len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0],
[1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0],
[1, 0, 0],
[1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, loss is 1/0 = inf
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesisAndTruth(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [0] # Normalized is 0 because of exact match
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
fjbatresv/odoo | addons/mail/mail_message.py | 141 | 47462 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from email.header import decode_header
from email.utils import formataddr
from openerp import SUPERUSER_ID, api
from openerp.osv import osv, orm, fields
from openerp.tools import html_email_clean
from openerp.tools.translate import _
from HTMLParser import HTMLParser
_logger = logging.getLogger(__name__)
""" Some tools for parsing / creating email fields """
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
if text:
text = decode_header(text.replace('\r', ''))
# The joining space will not be needed as of Python 3.3
# See https://hg.python.org/cpython/rev/8c03fe231877
return ' '.join([tools.ustr(x[0], x[1]) for x in text])
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class mail_message(osv.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
_message_read_fields = ['id', 'parent_id', 'model', 'res_id', 'body', 'subject', 'date', 'to_read', 'email_from',
'type', 'vote_user_ids', 'attachment_ids', 'author_id', 'partner_ids', 'record_name']
_message_record_name_length = 18
_message_read_more_limit = 1024
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
if context and context.get('default_type') and context.get('default_type') not in [
val[0] for val in self._columns['type'].selection]:
context = dict(context, default_type=None)
return super(mail_message, self).default_get(cr, uid, fields, context=context)
def _get_to_read(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('is_read', '=', False),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_to_read(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a is_read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.is_read', '=', not domain[0][2])]
def _get_starred(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('starred', '=', True),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_starred(self, cr, uid, obj, name, domain, context=None):
""" Search for starred messages by the current user."""
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.starred', '=', domain[0][2])]
_columns = {
'type': fields.selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification'),
], 'Type', size=12,
help="Message type: email for email message, notification for system "\
"message, comment for other messages such as user replies"),
'email_from': fields.char('From',
help="Email address of the sender. This field is set when no matching partner is found for incoming emails."),
'reply_to': fields.char('Reply-To',
help='Reply email address. Setting the reply_to bypasses the automatic thread creation.'),
'no_auto_thread': fields.boolean('No threading for answers',
help='Answers do not go in the original document\' discussion thread. This has an impact on the generated message-id.'),
'author_id': fields.many2one('res.partner', 'Author', select=1,
ondelete='set null',
help="Author of the message. If not set, email_from may hold an email address that did not match any partner."),
'author_avatar': fields.related('author_id', 'image_small', type="binary", string="Author's Avatar"),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
'notified_partner_ids': fields.many2many('res.partner', 'mail_notification',
'message_id', 'partner_id', 'Notified partners',
help='Partners that have a notification pushing this message in their mailboxes'),
'attachment_ids': fields.many2many('ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id', 'Attachments'),
'parent_id': fields.many2one('mail.message', 'Parent Message', select=True,
ondelete='set null', help="Initial thread message."),
'child_ids': fields.one2many('mail.message', 'parent_id', 'Child Messages'),
'model': fields.char('Related Document Model', size=128, select=1),
'res_id': fields.integer('Related Document ID', select=1),
'record_name': fields.char('Message Record Name', help="Name get of the related document."),
'notification_ids': fields.one2many('mail.notification', 'message_id',
string='Notifications', auto_join=True,
help='Technical field holding the message notifications. Use notified_partner_ids to access notified partners.'),
'subject': fields.char('Subject'),
'date': fields.datetime('Date'),
'message_id': fields.char('Message-Id', help='Message unique identifier', select=1, readonly=1, copy=False),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
'to_read': fields.function(_get_to_read, fnct_search=_search_to_read,
type='boolean', string='To read',
help='Current user has an unread notification linked to this message'),
'starred': fields.function(_get_starred, fnct_search=_search_starred,
type='boolean', string='Starred',
help='Current user has a starred notification linked to this message'),
'subtype_id': fields.many2one('mail.message.subtype', 'Subtype',
ondelete='set null', select=1,),
'vote_user_ids': fields.many2many('res.users', 'mail_vote',
'message_id', 'user_id', string='Votes',
help='Users that voted for this message'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
}
def _needaction_domain_get(self, cr, uid, context=None):
return [('to_read', '=', True)]
def _get_default_from(self, cr, uid, context=None):
this = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if this.alias_name and this.alias_domain:
return formataddr((this.name, '%s@%s' % (this.alias_name, this.alias_domain)))
elif this.email:
return formataddr((this.name, this.email))
raise osv.except_osv(_('Invalid Action!'), _("Unable to send email, please configure the sender's email address or alias."))
def _get_default_author(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
_defaults = {
'type': 'email',
'date': fields.datetime.now,
'author_id': lambda self, cr, uid, ctx=None: self._get_default_author(cr, uid, ctx),
'body': '',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
}
#------------------------------------------------------
# Vote/Like
#------------------------------------------------------
def vote_toggle(self, cr, uid, ids, context=None):
''' Toggles vote. Performed using read to avoid access rights issues.
Done as SUPERUSER_ID because uid may vote for a message he cannot modify. '''
for message in self.read(cr, uid, ids, ['vote_user_ids'], context=context):
new_has_voted = not (uid in message.get('vote_user_ids'))
if new_has_voted:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(4, uid)]}, context=context)
else:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(3, uid)]}, context=context)
return new_has_voted or False
#------------------------------------------------------
# download an attachment
#------------------------------------------------------
def download_attachment(self, cr, uid, id_message, attachment_id, context=None):
""" Return the content of linked attachments. """
# this will fail if you cannot read the message
message_values = self.read(cr, uid, [id_message], ['attachment_ids'], context=context)[0]
if attachment_id in message_values['attachment_ids']:
attachment = self.pool.get('ir.attachment').browse(cr, SUPERUSER_ID, attachment_id, context=context)
if attachment.datas and attachment.datas_fname:
return {
'base64': attachment.datas,
'filename': attachment.datas_fname,
}
return False
#------------------------------------------------------
# Notification API
#------------------------------------------------------
@api.cr_uid_ids_context
def set_message_read(self, cr, uid, msg_ids, read, create_missing=True, context=None):
""" Set messages as (un)read. Technically, the notifications related
to uid are set to (un)read. If for some msg_ids there are missing
notifications (i.e. due to load more or thread parent fetching),
they are created.
:param bool read: set notification as (un)read
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
:return number of message mark as read
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('is_read', '=', not read)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)read
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
# some messages do not have notifications: find which one, create notification, update read status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'is_read': read, 'message_id': msg_id}, context=context)
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
@api.cr_uid_ids_context
def set_message_starred(self, cr, uid, msg_ids, starred, create_missing=True, context=None):
""" Set messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
:param bool starred: set notification as (un)starred
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('starred', '=', not starred)]
values = {
'starred': starred
}
if starred:
values['is_read'] = False
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)starred
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
# some messages do not have notifications: find which one, create notification, update starred status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, dict(values, partner_id=user_pid, message_id=msg_id), context=context)
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
def _message_read_dict_postprocess(self, cr, uid, messages, message_tree, context=None):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
res_partner_obj = self.pool.get('res.partner')
ir_attachment_obj = self.pool.get('ir.attachment')
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
# 1. Aggregate partners (author_id and partner_ids) and attachments
partner_ids = set()
attachment_ids = set()
for key, message in message_tree.iteritems():
if message.author_id:
partner_ids |= set([message.author_id.id])
if message.subtype_id and message.notified_partner_ids: # take notified people of message with a subtype
partner_ids |= set([partner.id for partner in message.notified_partner_ids])
elif not message.subtype_id and message.partner_ids: # take specified people of message without a subtype (log)
partner_ids |= set([partner.id for partner in message.partner_ids])
if message.attachment_ids:
attachment_ids |= set([attachment.id for attachment in message.attachment_ids])
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners = res_partner_obj.name_get(cr, SUPERUSER_ID, list(partner_ids), context=context)
partner_tree = dict((partner[0], partner) for partner in partners)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments = ir_attachment_obj.read(cr, SUPERUSER_ID, list(attachment_ids), ['id', 'datas_fname', 'name', 'file_type_icon'], context=context)
attachments_tree = dict((attachment['id'], {
'id': attachment['id'],
'filename': attachment['datas_fname'],
'name': attachment['name'],
'file_type_icon': attachment['file_type_icon'],
}) for attachment in attachments)
# 3. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
if message.subtype_id:
partner_ids = [partner_tree[partner.id] for partner in message.notified_partner_ids
if partner.id in partner_tree]
else:
partner_ids = [partner_tree[partner.id] for partner in message.partner_ids
if partner.id in partner_tree]
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
message_dict.update({
'is_author': pid == author[0],
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
'user_pid': pid
})
return True
def _message_read_dict(self, cr, uid, message, parent_id=False, context=None):
""" Return a dict representation of the message. This representation is
used in the JS client code, to display the messages. Partners and
attachments related stuff will be done in post-processing in batch.
:param dict message: mail.message browse record
"""
# private message: no model, no res_id
is_private = False
if not message.model or not message.res_id:
is_private = True
# votes and favorites: res.users ids, no prefetching should be done
vote_nb = len(message.vote_user_ids)
has_voted = uid in [user.id for user in message.vote_user_ids]
try:
if parent_id:
max_length = 300
else:
max_length = 100
body_short = html_email_clean(message.body, remove=False, shorten=True, max_length=max_length)
except Exception:
body_short = '<p><b>Encoding Error : </b><br/>Unable to convert this message (id: %s).</p>' % message.id
_logger.exception(Exception)
return {'id': message.id,
'type': message.type,
'subtype': message.subtype_id.name if message.subtype_id else False,
'body': message.body,
'body_short': body_short,
'model': message.model,
'res_id': message.res_id,
'record_name': message.record_name,
'subject': message.subject,
'date': message.date,
'to_read': message.to_read,
'parent_id': parent_id,
'is_private': is_private,
'author_id': False,
'author_avatar': message.author_avatar,
'is_author': False,
'partner_ids': [],
'vote_nb': vote_nb,
'has_voted': has_voted,
'is_favorite': message.starred,
'attachment_ids': [],
}
def _message_read_add_expandables(self, cr, uid, messages, message_tree, parent_tree,
message_unload_ids=[], thread_level=0, domain=[], parent_id=False, context=None):
""" Create expandables for message_read, to load new messages.
1. get the expandable for new threads
if display is flat (thread_level == 0):
fetch message_ids < min(already displayed ids), because we
want a flat display, ordered by id
else:
fetch message_ids that are not childs of already displayed
messages
2. get the expandables for new messages inside threads if display
is not flat
for each thread header, search for its childs
for each hole in the child list based on message displayed,
create an expandable
:param list messages: list of message structure for the Chatter
widget to which expandables are added
:param dict message_tree: dict [id]: browse record of this message
:param dict parent_tree: dict [parent_id]: [child_ids]
:param list message_unload_ids: list of message_ids we do not want
to load
:return bool: True
"""
def _get_expandable(domain, message_nb, parent_id, max_limit):
return {
'domain': domain,
'nb_messages': message_nb,
'type': 'expandable',
'parent_id': parent_id,
'max_limit': max_limit,
}
if not messages:
return True
message_ids = sorted(message_tree.keys())
# 1. get the expandable for new threads
if thread_level == 0:
exp_domain = domain + [('id', '<', min(message_unload_ids + message_ids))]
else:
exp_domain = domain + ['!', ('id', 'child_of', message_unload_ids + parent_tree.keys())]
more_count = self.search(cr, uid, exp_domain, context=context, limit=1)
if more_count:
# inside a thread: prepend
if parent_id:
messages.insert(0, _get_expandable(exp_domain, -1, parent_id, True))
# new threads: append
else:
messages.append(_get_expandable(exp_domain, -1, parent_id, True))
# 2. get the expandables for new messages inside threads if display is not flat
if thread_level == 0:
return True
for message_id in message_ids:
message = message_tree[message_id]
# generate only for thread header messages (TDE note: parent_id may be False is uid cannot see parent_id, seems ok)
if message.parent_id:
continue
# check there are message for expandable
child_ids = set([child.id for child in message.child_ids]) - set(message_unload_ids)
child_ids = sorted(list(child_ids), reverse=True)
if not child_ids:
continue
# make groups of unread messages
id_min, id_max, nb = max(child_ids), 0, 0
for child_id in child_ids:
if not child_id in message_ids:
nb += 1
if id_min > child_id:
id_min = child_id
if id_max < child_id:
id_max = child_id
elif nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(child_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, False))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
id_min, id_max, nb = max(child_ids), 0, 0
else:
id_min, id_max, nb = max(child_ids), 0, 0
if nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(message_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, id_min))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
return True
@api.cr_uid_context
def message_read(self, cr, uid, ids=None, domain=None, message_unload_ids=None,
thread_level=0, context=None, parent_id=False, limit=None):
""" Read messages from mail.message, and get back a list of structured
messages to be displayed as discussion threads. If IDs is set,
fetch these records. Otherwise use the domain to fetch messages.
After having fetch messages, their ancestors will be added to obtain
well formed threads, if uid has access to them.
After reading the messages, expandable messages are added in the
message list (see ``_message_read_add_expandables``). It consists
in messages holding the 'read more' data: number of messages to
read, domain to apply.
:param list ids: optional IDs to fetch
:param list domain: optional domain for searching ids if ids not set
:param list message_unload_ids: optional ids we do not want to fetch,
because i.e. they are already displayed somewhere
:param int parent_id: context of parent_id
- if parent_id reached when adding ancestors, stop going further
in the ancestor search
- if set in flat mode, ancestor_id is set to parent_id
:param int limit: number of messages to fetch, before adding the
ancestors and expandables
:return list: list of message structure for the Chatter widget
"""
assert thread_level in [0, 1], 'message_read() thread_level should be 0 (flat) or 1 (1 level of thread); given %s.' % thread_level
domain = domain if domain is not None else []
message_unload_ids = message_unload_ids if message_unload_ids is not None else []
if message_unload_ids:
domain += [('id', 'not in', message_unload_ids)]
limit = limit or self._message_read_limit
message_tree = {}
message_list = []
parent_tree = {}
# no specific IDS given: fetch messages according to the domain, add their parents if uid has access to
if ids is None:
ids = self.search(cr, uid, domain, context=context, limit=limit)
# fetch parent if threaded, sort messages
for message in self.browse(cr, uid, ids, context=context):
message_id = message.id
if message_id in message_tree:
continue
message_tree[message_id] = message
# find parent_id
if thread_level == 0:
tree_parent_id = parent_id
else:
tree_parent_id = message_id
parent = message
while parent.parent_id and parent.parent_id.id != parent_id:
parent = parent.parent_id
tree_parent_id = parent.id
if not parent.id in message_tree:
message_tree[parent.id] = parent
# newest messages first
parent_tree.setdefault(tree_parent_id, [])
if tree_parent_id != message_id:
parent_tree[tree_parent_id].append(self._message_read_dict(cr, uid, message_tree[message_id], parent_id=tree_parent_id, context=context))
if thread_level:
for key, message_id_list in parent_tree.iteritems():
message_id_list.sort(key=lambda item: item['id'])
message_id_list.insert(0, self._message_read_dict(cr, uid, message_tree[key], context=context))
# create final ordered message_list based on parent_tree
parent_list = parent_tree.items()
parent_list = sorted(parent_list, key=lambda item: max([msg.get('id') for msg in item[1]]) if item[1] else item[0], reverse=True)
message_list = [message for (key, msg_list) in parent_list for message in msg_list]
# get the child expandable messages for the tree
self._message_read_dict_postprocess(cr, uid, message_list, message_tree, context=context)
self._message_read_add_expandables(cr, uid, message_list, message_tree, parent_tree,
thread_level=thread_level, message_unload_ids=message_unload_ids, domain=domain, parent_id=parent_id, context=context)
return message_list
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
def init(self, cr):
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
doc_ids = doc_dict.keys()
ctx = dict(context or {}, active_test=False)
allowed_doc_ids = self.pool[doc_model].search(cr, uid, [('id', 'in', doc_ids)], context=ctx)
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
def _find_allowed_doc_ids(self, cr, uid, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not model_access_obj.check(cr, uid, doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
return allowed_ids
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer
to check_access_rule for more details about those rules.
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- a notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(
cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
# Perform a super with count as False, to have the ids, not a counter
ids = super(mail_message, self)._search(
cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
author_ids, partner_ids, allowed_ids = set([]), set([]), set([])
model_ids = {}
# check read access rights before checking the actual rules on the given ids
super(mail_message, self).check_access_rights(cr, access_rights_uid or uid, 'read')
cr.execute("""SELECT DISTINCT m.id, m.model, m.res_id, m.author_id, n.partner_id
FROM "%s" m LEFT JOIN "mail_notification" n
ON n.message_id=m.id AND n.partner_id = (%%s)
WHERE m.id = ANY (%%s)""" % self._table, (pid, ids,))
for id, rmod, rid, author_id, partner_id in cr.fetchall():
if author_id == pid:
author_ids.add(id)
elif partner_id == pid:
partner_ids.add(id)
elif rmod and rid:
model_ids.setdefault(rmod, {}).setdefault(rid, set()).add(id)
allowed_ids = self._find_allowed_doc_ids(cr, uid, model_ids, context=context)
final_ids = author_ids | partner_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author, OR
- mail_notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id, I create a private message OR
- pid in message_follower_ids if model, res_id OR
- mail_notification (parent_id.id, pid) exists, uid has been notified of the parent, OR
- uid have write or create access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid has write or create access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write or create access on the related document if model, res_id
- otherwise: raise
"""
def _generate_model_record_ids(msg_val, msg_ids):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
vals = msg_val.get(id, {})
if vals.get('model') and vals.get('res_id'):
model_record_ids.setdefault(vals['model'], set()).add(vals['res_id'])
return model_record_ids
if uid == SUPERUSER_ID:
return
if isinstance(ids, (int, long)):
ids = [ids]
not_obj = self.pool.get('mail.notification')
fol_obj = self.pool.get('mail.followers')
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=None).partner_id.id
# Read mail_message.ids to have their values
message_values = dict((res_id, {}) for res_id in ids)
cr.execute('SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)' % self._table, (ids,))
for id, rmod, rid, author_id, parent_id in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private)) -> could become an ir.rule ?
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == partner_id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
not_ids = not_obj.search(cr, SUPERUSER_ID, [('message_id.id', 'in', parent_ids), ('partner_id', '=', partner_id)], context=context)
not_parent_ids = [notif.message_id.id for notif in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Notification condition, for read (check for received notifications and create (in message_follower_ids)) -> could become an ir.rule, but not till we do not have a many2one variable field
other_ids = set(ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation == 'read':
not_ids = not_obj.search(cr, SUPERUSER_ID, [
('partner_id', '=', partner_id),
('message_id', 'in', ids),
], context=context)
notified_ids = [notification.message_id.id for notification in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
elif operation == 'create':
for doc_model, doc_ids in model_record_ids.items():
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', doc_model),
('res_id', 'in', list(doc_ids)),
('partner_id', '=', partner_id),
], context=context)
fol_mids = [follower.res_id for follower in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_ids in model_record_ids.items():
model_obj = self.pool[model]
mids = model_obj.exists(cr, uid, list(doc_ids))
if hasattr(model_obj, 'check_mail_message_access'):
model_obj.check_mail_message_access(cr, uid, mids, operation, context=context)
else:
self.pool['mail.thread'].check_mail_message_access(cr, uid, mids, operation, model_obj=model_obj, context=context)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') %
(self._description, operation))
def _get_record_name(self, cr, uid, values, context=None):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
if not values.get('model') or not values.get('res_id') or values['model'] not in self.pool:
return False
return self.pool[values['model']].name_get(cr, SUPERUSER_ID, [values['res_id']], context=context)[0][1]
def _get_reply_to(self, cr, uid, values, context=None):
""" Return a specific reply_to: alias of the document through message_get_reply_to
or take the email_from
"""
model, res_id, email_from = values.get('model'), values.get('res_id'), values.get('email_from')
ctx = dict(context, thread_model=model)
return self.pool['mail.thread'].message_get_reply_to(cr, uid, [res_id], default=email_from, context=ctx)[res_id]
def _get_message_id(self, cr, uid, values, context=None):
if values.get('no_auto_thread', False) is True:
message_id = tools.generate_tracking_message_id('reply_to')
elif values.get('res_id') and values.get('model'):
message_id = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
else:
message_id = tools.generate_tracking_message_id('private')
return message_id
def create(self, cr, uid, values, context=None):
context = dict(context or {})
default_starred = context.pop('default_starred', False)
if 'email_from' not in values: # needed to compute reply_to
values['email_from'] = self._get_default_from(cr, uid, context=context)
if not values.get('message_id'):
values['message_id'] = self._get_message_id(cr, uid, values, context=context)
if 'reply_to' not in values:
values['reply_to'] = self._get_reply_to(cr, uid, values, context=context)
if 'record_name' not in values and 'default_record_name' not in context:
values['record_name'] = self._get_record_name(cr, uid, values, context=context)
newid = super(mail_message, self).create(cr, uid, values, context)
self._notify(cr, uid, newid, context=context,
force_send=context.get('mail_notify_force_send', True),
user_signature=context.get('mail_notify_user_signature', True))
# TDE FIXME: handle default_starred. Why not setting an inv on starred ?
# Because starred will call set_message_starred, that looks for notifications.
# When creating a new mail_message, it will create a notification to a message
# that does not exist, leading to an error (key not existing). Also this
# this means unread notifications will be created, yet we can not assure
# this is what we want.
if default_starred:
self.set_message_starred(cr, uid, [newid], True, context=context)
return newid
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule(cr, uid, ids, 'read', context=context)
res = super(mail_message, self).read(cr, uid, ids, fields=fields, context=context, load=load)
return res
def unlink(self, cr, uid, ids, context=None):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
attachments_to_delete = []
for message in self.browse(cr, uid, ids, context=context):
for attach in message.attachment_ids:
if attach.res_model == self._name and (attach.res_id == message.id or attach.res_id == 0):
attachments_to_delete.append(attach.id)
if attachments_to_delete:
self.pool.get('ir.attachment').unlink(cr, uid, attachments_to_delete, context=context)
return super(mail_message, self).unlink(cr, uid, ids, context=context)
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
def _notify(self, cr, uid, newid, context=None, force_send=False, user_signature=True):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
notification_obj = self.pool.get('mail.notification')
message = self.browse(cr, uid, newid, context=context)
partners_to_notify = set([])
# all followers of the mail.message document have to be added as partners and notified if a subtype is defined (otherwise: log message)
if message.subtype_id and message.model and message.res_id:
fol_obj = self.pool.get("mail.followers")
# browse as SUPERUSER because rules could restrict the search results
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', message.model),
('res_id', '=', message.res_id),
], context=context)
partners_to_notify |= set(
fo.partner_id.id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)
if message.subtype_id.id in [st.id for st in fo.subtype_ids]
)
# remove me from notified partners, unless the message is written on my own wall
if message.subtype_id and message.author_id and message.model == "res.partner" and message.res_id == message.author_id.id:
partners_to_notify |= set([message.author_id.id])
elif message.author_id:
partners_to_notify -= set([message.author_id.id])
# all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)
if message.partner_ids:
partners_to_notify |= set([p.id for p in message.partner_ids])
# notify
notification_obj._notify(
cr, uid, newid, partners_to_notify=list(partners_to_notify), context=context,
force_send=force_send, user_signature=user_signature
)
message.refresh()
# An error appear when a user receive a notification without notifying
# the parent message -> add a read notification for the parent
if message.parent_id:
# all notified_partner_ids of the mail.message have to be notified for the parented messages
partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)
for partner in partners_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner.id,
'is_read': True,
}, context=context)
| agpl-3.0 |
Omegaphora/external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 35 | 11261 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
sys.path.append(os.path.join(CHROMIUM_DIR, 'build'))
import detect_host_arch
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chrome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in detect_host_arch.HostArch():
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
pkg_ver_dir = os.path.join(nacl_dir, 'build', 'package_version')
RunCommand([python, os.path.join(pkg_ver_dir, 'package_version.py'),
'--exclude', 'arm_trusted',
'--exclude', 'pnacl_newlib',
'--exclude', 'nacl_arm_newlib',
'sync', '--extract'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
hwroitzsch/DayLikeTodayClone | venv/lib/python3.5/site-packages/pip/vcs/bazaar.py | 280 | 4427 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = self.run_command(
['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| mit |
jjo31/ATHAM-Fluidity | python/fluidity/microphysics/FortranMicrophysicsWrapper.py | 1 | 4818 | import os
path=os.path.dirname(__file__)
def MakeWrapperFiles(field_dict,call_str,pointwise):
write_to_file(field_dict,call_str,pointwise)
def allocate_str(field_dict):
s=""" subroutine allocate_storage(number_of_tracers,n)
integer :: n
!f2py integer, intent(hide), depend(number_of_tracers) :: n=shape(number_of_tracers,0)
integer :: number_of_tracers(n)
"""
for n,k in enumerate(field_dict):
s+=" if (allocated(%s)) deallocate(%s)\n"%(k,k)
s+=" allocate(%s(number_of_tracers(%d)))\n"%(k,n+1)
s+=" end subroutine allocate_storage\n\n"
return s
def finalize_str(field_dict):
s=" subroutine finalize\n"
for k in field_dict:
s+="deallocate(%s)\n"%k
s+=" end subroutine finalize\n\n"
return s
def set_field_str(fname):
s=""" subroutine set_%s(i,new_val,n,old_val,source,m)
integer :: m,n
!f2py integer, intent(hide), depend(new_val) :: n=shape(new_val,0)
!f2py integer, intent(hide), depend(new_val) :: m=shape(source,0)
real, intent(in), dimension(n), target :: new_val, old_val
real, intent(in), dimension(n), target, optional ::source
!f2py real, intent(inplace), dimension(n) :: new_val, old_val
!f2py real, intent(inplace), dimension(n), optional :: source
integer :: i
%s(i)%%new=>new_val
%s(i)%%old=>old_val
print*, present(source), m
if (present(source) .and. m==n)&
%s(i)%%source=>source
end subroutine set_%s
"""%(fname,fname,fname,fname,fname)
return s
def run_str(field_dict,call_string):
s="""subroutine run_microphysics(current_time,dt)
real, intent(in) :: current_time, dt
interface
subroutine %s(time,timestep"""%call_string
for n,k in enumerate(field_dict):
s+=',&\n t%d'%n
s+=')\n'
s+=' use FW_data_type\n'
s+=' real, intent(in) :: time, timestep\n'
for n,k in enumerate(field_dict):
s+=' type(basic_scalar), intent(inout), dimension(:) :: t%d\n'%n
s+=' end subroutine %s\n'%call_string
s+=""" end interface
call %s(current_time,dt"""%call_string
for k in field_dict:
s+=',&\n %s'%k
s+=')\n\n'
s+=' end subroutine run_microphysics\n\n'
return s
def run_str_pointwise(field_dict,call_string):
s="""subroutine run_microphysics(current_time,dt)
real, intent(in) :: current_time, dt
integer :: i,j\n
"""
for n,k in enumerate(field_dict):
s+=' real, dimension(size(%s),3) :: tracer%d\n'%(k,n)
s+=""" interface\n
subroutine %s(time,timestep"""%call_string
for n,k in enumerate(field_dict):
s+=',&\n t%d'%n
s+=')\n'
s+=' use FW_data_type\n'
s+=' real, intent(in) :: time, timestep\n'
for n,k in enumerate(field_dict):
s+=' real, intent(inout), dimension(:,:) :: t%d\n'%n
s+=' end subroutine %s\n'%call_string
s+=" end interface\n"
s+=" do i=1, size(%s(0)%%new)\n"%(field_dict.keys()[0])
for n,k in enumerate(field_dict):
s+=' do j=1,size(%s)\n'%k
s+=' tracer%d(j,1)=%s(j)%%new(i)\n'%(n,k)
s+=' tracer%d(j,2)=%s(j)%%old(i)\n'%(n,k)
s+=' if (associated(%s(j)%%source))&\n tracer%d(j,3)=%s(j)%%source(i)\n'%(k,n,k)
s+=' end do\n\n'
s+=" call %s(current_time,dt"%call_string
for k in range(len(field_dict)):
s+=',&\n tracer%d'%n
s+=')\n\n'
for n,k in enumerate(field_dict):
s+=' do j=1,size(%s)\n'%k
s+=' %s(j)%%new(i)=tracer%d(j,1)\n'%(k,n)
s+=' %s(j)%%old(i)=tracer%d(j,2)\n'%(k,n)
s+=' if (associated(%s(j)%%source))&\n %s(j)%%source(i)=tracer%d(j,3)\n'%(k,k,n)
s+=' end do\n\n'
s+=' end do\n\n'
s+=' end subroutine run_microphysics\n\n'
return s
def write_to_file(field_dict={},
call_string='',
pointwise=False,
dirname=path+'/src',
src_name='FW_auto',
data_name='FW_data'):
f=open(dirname+'/'+src_name+'.F90','w')
s="""module FW_auto
use FW_data
implicit none
contains
"""
f.write(s)
f.write(allocate_str(field_dict))
f.write(finalize_str(field_dict))
for k in field_dict:
f.write(set_field_str(k))
if pointwise:
f.write(run_str_pointwise(field_dict,call_string))
else:
f.write(run_str(field_dict,call_string))
f.write("end module FW_Auto\n")
f.close()
f=open(dirname+'/'+data_name+'.F90','w')
f.write("""module %s
use FW_data_type
"""%data_name)
for k in field_dict:
f.write(' type(basic_scalar), dimension(:), allocatable :: %s\n'%k)
f.write('end module %s\n'%data_name)
f.close()
| lgpl-2.1 |
kubeflow/kfp-tekton | third_party/metadata_envoy/dependency_helper.py | 2 | 1639 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper script to download license files for library dependencies.
Script relies on dependency spec json file to provide destination path to store
license files and list all the libraries and their corresponding url to license
file.
"""
import json
import os
import sys
import requests
def copy_third_party_licenses(dependency_spec):
if not os.path.isfile(dependency_spec):
print('dependency spec: {} not found'.format(dependency_spec))
sys.exit(1)
with open(dependency_spec, 'r') as f:
dependencies = json.load(f)
with open('license.txt', 'w') as l:
for dependency in dependencies['libraries']:
print('Downloading License for library : {}'.format(dependency['library']))
l.write('Library: {}\n\n'.format(dependency['library']))
l.write(requests.get(dependency['license_url']).text.encode("utf-8"))
l.write('\n\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print('script expects path to the dependency spec file as argument')
sys.exit(1)
copy_third_party_licenses(sys.argv[1])
| apache-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-0.96/django/core/serializers/xml_serializer.py | 32 | 8651 | """
XML serializer.
"""
from django.conf import settings
from django.core.serializers import base
from django.db import models
from django.utils.xmlutils import SimplerXMLGenerator
from xml.dom import pulldom
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent', None) is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent', None) * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
self.xml.startElement("object", {
"pk" : str(obj._get_pk_val()),
"model" : str(obj._meta),
})
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"type" : field.get_internal_type()
})
# Get a "string version" of the object's data (this is handled by the
# serializer base class).
if getattr(obj, field.name) is not None:
value = self.get_string_value(obj, field)
self.xml.characters(str(value))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related = getattr(obj, field.name)
if related is not None:
self.xml.characters(str(related._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
self._start_relational_field(field)
for relobj in getattr(obj, field.name).iterator():
self.xml.addQuickElement("object", attrs={"pk" : str(relobj._get_pk_val())})
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"rel" : field.rel.__class__.__name__,
"to" : str(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.encoding = self.options.get("encoding", settings.DEFAULT_CHARSET)
self.event_stream = pulldom.parse(self.stream)
def next(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails, bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object. If the node is
# missing the pk attribute, bail.
pk = node.getAttribute("pk")
if not pk:
raise base.DeserializationError("<object> node is missing the 'pk' attribute")
data = {Model._meta.pk.attname : Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if len(field_node.childNodes) == 1 and field_node.childNodes[0].nodeName == 'None':
value = None
else:
value = field.to_python(getInnerText(field_node).strip().encode(self.encoding))
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if len(node.childNodes) == 1 and node.childNodes[0].nodeName == 'None':
return None
else:
return field.rel.to._meta.pk.to_python(
getInnerText(node).strip().encode(self.encoding))
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField
"""
return [field.rel.to._meta.pk.to_python(
c.getAttribute("pk").encode(self.encoding))
for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute" \
% (node.nodeName, attr))
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'" % \
(node.nodeName, model_identifier))
return Model
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text) | mit |
CartoDB/cartoframes | cartoframes/io/managers/context_manager.py | 1 | 22518 | import time
import pandas as pd
from warnings import warn
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
from carto.exceptions import CartoException, CartoRateLimitException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
from pyrestcli.exceptions import NotFoundException
from ..dataset_info import DatasetInfo
from ... import __version__
from ...auth.defaults import get_default_credentials
from ...utils.logger import log
from ...utils.geom_utils import encode_geometry_ewkb
from ...utils.utils import (is_sql_query, check_credentials, encode_row, map_geom_type, PG_NULL, double_quote,
create_tmp_name)
from ...utils.columns import (get_dataframe_columns_info, get_query_columns_info, obtain_converters, date_columns_names,
normalize_name)
DEFAULT_RETRY_TIMES = 3
BATCH_API_PAYLOAD_THRESHOLD = 12000
def retry_copy(func):
def wrapper(*args, **kwargs):
m_retry_times = kwargs.get('retry_times', DEFAULT_RETRY_TIMES)
while m_retry_times >= 1:
try:
return func(*args, **kwargs)
except CartoRateLimitException as err:
m_retry_times -= 1
if m_retry_times <= 0:
warn(('Read call was rate-limited. '
'This usually happens when there are multiple queries being read at the same time.'))
raise err
warn('Read call rate limited. Waiting {s} seconds'.format(s=err.retry_after))
time.sleep(err.retry_after)
warn('Retrying...')
return func(*args, **kwargs)
return wrapper
def not_found(func):
def decorator_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except CartoException as e:
if hasattr(e, 'args') and isinstance(e.args, (list, tuple)) and type(e.args[0]) == NotFoundException:
raise Exception('Resource not found') from None
else:
raise e
return decorator_func
class ContextManager:
def __init__(self, credentials):
self.credentials = credentials or get_default_credentials()
check_credentials(self.credentials)
self.auth_client = _create_auth_client(self.credentials)
self.sql_client = SQLClient(self.auth_client)
self.copy_client = CopySQLClient(self.auth_client)
self.batch_sql_client = BatchSQLClient(self.auth_client)
@not_found
def execute_query(self, query, parse_json=True, do_post=True, format=None, **request_args):
return self.sql_client.send(query.strip(), parse_json, do_post, format, **request_args)
@not_found
def execute_long_running_query(self, query):
return self.batch_sql_client.create_and_wait_for_completion(query.strip())
def copy_to(self, source, schema=None, limit=None, retry_times=DEFAULT_RETRY_TIMES):
query = self.compute_query(source, schema)
columns = self._get_query_columns_info(query)
copy_query = self._get_copy_query(query, columns, limit)
return self._copy_to(copy_query, columns, retry_times)
def copy_from(self, gdf, table_name, if_exists='fail', cartodbfy=True,
retry_times=DEFAULT_RETRY_TIMES):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
df_columns = get_dataframe_columns_info(gdf)
if self.has_table(table_name, schema):
if if_exists == 'replace':
table_query = self._compute_query_from_table(table_name, schema)
table_columns = self._get_query_columns_info(table_query)
if self._compare_columns(df_columns, table_columns):
# Equal columns: truncate table
self._truncate_table(table_name, schema)
else:
# Diff columns: truncate table and drop + add columns
self._truncate_and_drop_add_columns(
table_name, schema, df_columns, table_columns)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
cartodbfy = False
else:
self._create_table_from_columns(table_name, schema, df_columns)
self._copy_from(gdf, table_name, df_columns, retry_times)
if cartodbfy is True:
cartodbfy_query = _cartodbfy_query(table_name, schema)
self.execute_long_running_query(cartodbfy_query)
return table_name
def create_table_from_query(self, query, table_name, if_exists):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
if self.has_table(table_name, schema):
if if_exists == 'replace':
# TODO: review logic copy_from
self._drop_create_table_from_query(table_name, schema, query)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
pass
else:
self._drop_create_table_from_query(table_name, schema, query)
return table_name
def list_tables(self, schema=None):
datasets = DatasetManager(self.auth_client).filter(
show_table_size_and_row_count='false',
show_table='false',
show_stats='false',
show_likes='false',
show_liked='false',
show_permission='false',
show_uses_builder_features='false',
show_synchronization='false',
load_totals='false'
)
datasets.sort(key=lambda x: x.updated_at, reverse=True)
return pd.DataFrame([dataset.name for dataset in datasets], columns=['tables'])
def has_table(self, table_name, schema=None):
query = self.compute_query(table_name, schema)
return self._check_exists(query)
def delete_table(self, table_name):
query = _drop_table_query(table_name)
output = self.execute_query(query)
return not('notices' in output and 'does not exist' in output['notices'][0])
def _delete_function(self, function_name):
query = _drop_function_query(function_name)
self.execute_query(query)
return function_name
def _create_function(self, schema, statement,
function_name=None, columns_types=None, return_value='VOID', language='plpgsql'):
function_name = function_name or create_tmp_name(base='tmp_func')
safe_schema = double_quote(schema)
query, qualified_func_name = _create_function_query(
schema=safe_schema,
function_name=function_name,
statement=statement,
columns_types=columns_types or '',
return_value=return_value,
language=language)
self.execute_query(query)
return qualified_func_name
def rename_table(self, table_name, new_table_name, if_exists='fail'):
new_table_name = self.normalize_table_name(new_table_name)
if table_name == new_table_name:
raise ValueError('Table names are equal. Please choose a different table name.')
if not self.has_table(table_name):
raise Exception('Table "{table_name}" does not exist in your CARTO account.'.format(
table_name=table_name))
if self.has_table(new_table_name):
if if_exists == 'replace':
log.debug('Removing table "{}"'.format(new_table_name))
self.delete_table(new_table_name)
elif if_exists == 'fail':
raise Exception('Table "{new_table_name}" already exists in your CARTO account. '
'Please choose a different `new_table_name` or use '
'if_exists="replace" to overwrite it.'.format(
new_table_name=new_table_name))
self._rename_table(table_name, new_table_name)
return new_table_name
def update_privacy_table(self, table_name, privacy=None):
DatasetInfo(self.auth_client, table_name).update_privacy(privacy)
def get_privacy(self, table_name):
return DatasetInfo(self.auth_client, table_name).privacy
def get_schema(self):
"""Get user schema from current credentials"""
query = 'SELECT current_schema()'
result = self.execute_query(query, do_post=False)
schema = result['rows'][0]['current_schema']
log.debug('schema: {}'.format(schema))
return schema
def get_geom_type(self, query):
"""Fetch geom type of a remote table or query"""
distict_query = '''
SELECT distinct ST_GeometryType(the_geom) AS geom_type
FROM ({}) q
LIMIT 5
'''.format(query)
response = self.execute_query(distict_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
st_geom_type = response.get('rows')[0].get('geom_type')
if st_geom_type:
return map_geom_type(st_geom_type[3:])
return None
def get_num_rows(self, query):
"""Get the number of rows in the query"""
result = self.execute_query('SELECT COUNT(*) FROM ({query}) _query'.format(query=query))
return result.get('rows')[0].get('count')
def get_bounds(self, query):
extent_query = '''
SELECT ARRAY[
ARRAY[st_xmin(geom_env), st_ymin(geom_env)],
ARRAY[st_xmax(geom_env), st_ymax(geom_env)]
] bounds FROM (
SELECT ST_Extent(the_geom) geom_env
FROM ({}) q
) q;
'''.format(query)
response = self.execute_query(extent_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
return response.get('rows')[0].get('bounds')
return None
def get_column_names(self, source, schema=None, exclude=None):
query = self.compute_query(source, schema)
columns = [c.name for c in self._get_query_columns_info(query)]
if exclude and isinstance(exclude, list):
columns = list(set(columns) - set(exclude))
return columns
def is_public(self, query):
# Used to detect public tables in queries in the publication,
# because privacy only works for tables.
public_auth_client = _create_auth_client(self.credentials, public=True)
public_sql_client = SQLClient(public_auth_client)
exists_query = 'EXPLAIN {}'.format(query)
try:
public_sql_client.send(exists_query, do_post=False)
return True
except CartoException:
return False
def get_table_names(self, query):
# Used to detect tables in queries in the publication.
query = 'SELECT CDB_QueryTablesText($q${}$q$) as tables'.format(query)
result = self.execute_query(query)
tables = []
if result['total_rows'] > 0 and result['rows'][0]['tables']:
# Dataset_info only works with tables without schema
tables = [table.split('.')[1] if '.' in table else table for table in result['rows'][0]['tables']]
return tables
def _compare_columns(self, a, b):
a_copy = [i for i in a if _not_reserved(i.name)]
b_copy = [i for i in b if _not_reserved(i.name)]
a_copy.sort()
b_copy.sort()
return a_copy == b_copy
def _drop_create_table_from_query(self, table_name, schema, query):
log.debug('DROP + CREATE table "{}"'.format(table_name))
query = 'BEGIN; {drop}; {create}; COMMIT;'.format(
drop=_drop_table_query(table_name),
create=_create_table_from_query_query(table_name, query))
self.execute_long_running_query(query)
def _create_table_from_columns(self, table_name, schema, columns):
log.debug('CREATE table "{}"'.format(table_name))
query = 'BEGIN; {create}; COMMIT;'.format(
create=_create_table_from_columns_query(table_name, columns))
self.execute_query(query)
def _truncate_table(self, table_name, schema):
log.debug('TRUNCATE table "{}"'.format(table_name))
query = 'BEGIN; {truncate}; COMMIT;'.format(
truncate=_truncate_table_query(table_name))
self.execute_query(query)
def _truncate_and_drop_add_columns(self, table_name, schema, df_columns, table_columns):
log.debug('TRUNCATE AND DROP + ADD columns table "{}"'.format(table_name))
drop_columns = _drop_columns_query(table_name, table_columns)
add_columns = _add_columns_query(table_name, df_columns)
drop_add_columns = 'ALTER TABLE {table_name} {drop_columns},{add_columns};'.format(
table_name=table_name, drop_columns=drop_columns, add_columns=add_columns)
query = '{regenerate}; BEGIN; {truncate}; {drop_add_columns}; COMMIT;'.format(
regenerate=_regenerate_table_query(table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_columns=drop_add_columns)
query_length_over_threshold = len(query) > BATCH_API_PAYLOAD_THRESHOLD
if query_length_over_threshold:
qualified_func_name = self._create_function(
schema=schema, statement=drop_add_columns)
drop_add_func_sql = 'SELECT {}'.format(qualified_func_name)
query = '''
{regenerate};
BEGIN;
{truncate};
{drop_add_func_sql};
COMMIT;'''.format(
regenerate=_regenerate_table_query(
table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_func_sql=drop_add_func_sql)
try:
self.execute_long_running_query(query)
finally:
if query_length_over_threshold:
self._delete_function(qualified_func_name)
def compute_query(self, source, schema=None):
if is_sql_query(source):
return source
schema = schema or self.get_schema()
return self._compute_query_from_table(source, schema)
def _compute_query_from_table(self, table_name, schema):
return 'SELECT * FROM "{schema}"."{table_name}"'.format(
schema=schema or 'public',
table_name=table_name
)
def _check_exists(self, query):
exists_query = 'EXPLAIN {}'.format(query)
try:
self.execute_query(exists_query, do_post=False)
return True
except CartoException:
return False
def _check_regenerate_table_exists(self):
query = '''
SELECT 1
FROM pg_catalog.pg_proc p
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE p.proname = 'cdb_regeneratetable' AND n.nspname = 'cartodb';
'''
result = self.execute_query(query)
return len(result['rows']) > 0
def _get_query_columns_info(self, query):
query = 'SELECT * FROM ({}) _q LIMIT 0'.format(query)
table_info = self.execute_query(query)
return get_query_columns_info(table_info['fields'])
def _get_copy_query(self, query, columns, limit):
query_columns = [
double_quote(column.name) for column in columns
if (column.name != 'the_geom_webmercator')
]
query = 'SELECT {columns} FROM ({query}) _q'.format(
query=query,
columns=','.join(query_columns))
if limit is not None:
if isinstance(limit, int) and (limit >= 0):
query += ' LIMIT {limit}'.format(limit=limit)
else:
raise ValueError("`limit` parameter must an integer >= 0")
return query
@retry_copy
def _copy_to(self, query, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY TO')
copy_query = "COPY ({0}) TO stdout WITH (FORMAT csv, HEADER true, NULL '{1}')".format(query, PG_NULL)
raw_result = self.copy_client.copyto_stream(copy_query)
converters = obtain_converters(columns)
parse_dates = date_columns_names(columns)
df = pd.read_csv(
raw_result,
converters=converters,
parse_dates=parse_dates)
return df
@retry_copy
def _copy_from(self, dataframe, table_name, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY FROM')
query = """
COPY {table_name}({columns}) FROM stdin WITH (FORMAT csv, DELIMITER '|', NULL '{null}');
""".format(
table_name=table_name, null=PG_NULL,
columns=','.join(double_quote(column.dbname) for column in columns)).strip()
data = _compute_copy_data(dataframe, columns)
self.copy_client.copyfrom(query, data)
def _rename_table(self, table_name, new_table_name):
query = _rename_table_query(table_name, new_table_name)
self.execute_query(query)
def normalize_table_name(self, table_name):
norm_table_name = normalize_name(table_name)
if norm_table_name != table_name:
log.debug('Table name normalized: "{}"'.format(norm_table_name))
return norm_table_name
def _drop_table_query(table_name, if_exists=True):
return 'DROP TABLE {if_exists} {table_name}'.format(
table_name=table_name,
if_exists='IF EXISTS' if if_exists else '')
def _drop_function_query(function_name, columns_types=None, if_exists=True):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns)
return 'DROP FUNCTION {if_exists} {function_name}{columns_str_call}'.format(
function_name=function_name,
if_exists='IF EXISTS' if if_exists else '',
columns_str_call='({columns_str})'.format(columns_str=columns_str) if columns else '')
def _truncate_table_query(table_name):
return 'TRUNCATE TABLE {table_name}'.format(
table_name=table_name)
def _create_function_query(schema, function_name, statement, columns_types, return_value, language):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns) if columns else ''
function_query = '''
CREATE FUNCTION {schema}.{function_name}({columns_str})
RETURNS {return_value} AS $$
BEGIN
{statement}
END;
$$ LANGUAGE {language}
'''.format(schema=schema,
function_name=function_name,
statement=statement,
columns_str=columns_str,
return_value=return_value,
language=language)
qualified_func_name = '{schema}.{function_name}({columns_str})'.format(
schema=schema, function_name=function_name, columns_str=columns_str)
return function_query, qualified_func_name
def _drop_columns_query(table_name, columns):
columns = ['DROP COLUMN {name}'.format(name=double_quote(c.dbname))
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _add_columns_query(table_name, columns):
columns = ['ADD COLUMN {name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype)
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _not_reserved(column):
RESERVED_COLUMNS = ['cartodb_id', 'the_geom', 'the_geom_webmercator']
return column not in RESERVED_COLUMNS
def _create_table_from_columns_query(table_name, columns):
columns = ['{name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype) for c in columns]
return 'CREATE TABLE {table_name} ({columns})'.format(
table_name=table_name,
columns=','.join(columns))
def _create_table_from_query_query(table_name, query):
return 'CREATE TABLE {table_name} AS ({query})'.format(table_name=table_name, query=query)
def _cartodbfy_query(table_name, schema):
return "SELECT CDB_CartodbfyTable('{schema}', '{table_name}')".format(
schema=schema, table_name=table_name)
def _regenerate_table_query(table_name, schema):
return "SELECT CDB_RegenerateTable('{schema}.{table_name}'::regclass)".format(
schema=schema, table_name=table_name)
def _rename_table_query(table_name, new_table_name):
return 'ALTER TABLE {table_name} RENAME TO {new_table_name};'.format(
table_name=table_name, new_table_name=new_table_name)
def _create_auth_client(credentials, public=False):
return APIKeyAuthClient(
base_url=credentials.base_url,
api_key='default_public' if public else credentials.api_key,
session=credentials.session,
client_id='cartoframes_{}'.format(__version__),
user_agent='cartoframes_{}'.format(__version__))
def _compute_copy_data(df, columns):
for index in df.index:
row_data = []
for column in columns:
val = df.at[index, column.name]
if column.is_geom:
val = encode_geometry_ewkb(val)
row_data.append(encode_row(val))
csv_row = b'|'.join(row_data)
csv_row += b'\n'
yield csv_row
| bsd-3-clause |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/test/test_weakref.py | 23 | 50681 | import gc
import sys
import unittest
import UserList
import weakref
import operator
import contextlib
import copy
from test import test_support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
def create_unbound_method():
return C.method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __ne__(self, other):
if isinstance(other, Object):
return self.arg != other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
self.check_basic_ref(create_unbound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
self.check_basic_callback(create_unbound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = UserList.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
with test_support.check_py3k_warnings():
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = UserList.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = UserList.UserList(range(10))
p3 = weakref.proxy(L3)
with test_support.check_py3k_warnings():
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __unicode__(self):
return u"unicode"
instance = C()
self.assertIn("__unicode__", dir(weakref.proxy(instance)))
self.assertEqual(unicode(weakref.proxy(instance)), u"unicode")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that both old-style classes and new-style classes
# are weakrefable.
class A(object):
pass
class B:
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
b = weakref.ref(B, l.append)
B = None
gc.collect()
self.assertEqual(b(), None)
self.assertEqual(l, [a, b])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C(object):
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super(MyRef, self).__init__(ob, callback)
def __call__(self):
self.called = True
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(i, o) for i, o in enumerate(items))
# Keep an iterator alive
it = dct.iteritems()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
list(it)
del it
gc.collect()
n2 = len(dct)
# iteration should prevent garbage collection here
# Note that this is a test on an implementation detail. The requirement
# is only to provide stable iteration, not that the size of the container
# stay fixed.
self.assertEqual(n1, 20)
#self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda n, k: (k, n))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda n, k: (n, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.iteritems()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.iterkeyrefs())), len(objects))
for wr in dict.iterkeyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = dict.items()
for item in dict.iteritems():
items.remove(item)
self.assertEqual(len(items), 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
self.assertEqual(len(keys), 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
self.assertEqual(len(keys), 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
self.assertEqual(len(values), 0,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertFalse(k in dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertFalse(k in dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertFalse(k in dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.iteritems())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.iteritems())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o] = o.arg
return dict, objects
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict,
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict,
"original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(d.keys(), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(d.items(), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = d.keys()
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print r() is obj
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print r()
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super(ExtendedRef, self).__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.iteritems():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super(ExtendedRef, self).__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print 'OK'
... else:
... print 'WeakValueDictionary error'
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
test_support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
test_support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
| gpl-3.0 |
mrkn/iTerm2 | tools/ply/ply-3.4/test/yacc_badprec3.py | 174 | 1530 | # -----------------------------------------------------------------------------
# yacc_badprec3.py
#
# Bad precedence
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE','MINUS'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[3] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| gpl-2.0 |
hwjworld/xiaodun-platform | lms/djangoapps/wechat/views.py | 1 | 47459 | import logging
import urllib
from collections import defaultdict
from lxml import html
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
import django.utils
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access, sort_by_announcement, get_course_info_section,
get_course_by_id, get_course, course_image_url, get_course_about_section, get_courses_by_search)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor,mobi_toc_for_course
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment
from student.views import course_from_id, single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from util.json_request import JsonResponse
from xblock.fragment import Fragment
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
from xmodule.contentstore.content import StaticContent
import shoppingcart
from microsite_configuration import microsite
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
#@ensure_csrf_cookie
#@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
q = request.GET.get('query', '')
courses_aa = get_courses_by_search(request.META.get('HTTP_HOST'))
courses_list = []
if q != "":
for course in courses_aa:
if q in course.org or q in course.id or q in course.display_name_with_default:
courses_list.append(course)
else:
continue
else:
courses_list = courses_aa
courses = sort_by_announcement(courses_list)
return render_to_response("courseware/courses.html", {'courses': courses})
def return_fixed_courses(request, courses, user=AnonymousUser(), action=None):
default_length = 8
course_id = request.GET.get("course_id")
if course_id:
course_id = course_id.replace(".", '/')
try:
index_course = get_course_by_id(course_id)
course_index = (courses.index(index_course) + 1)
except:
course_index = 0
current_list = courses[course_index:]
if len(current_list) > default_length:
current_list = current_list[course_index:(course_index + 8)]
course_list = []
for course in current_list:
try:
course_json = mobi_course_info(request, course, action)
course_json["registered"] = registered_for_course(course, user)
course_list.append(course_json)
except:
continue
return JsonResponse({"count": len(courses), "course-list": course_list})
def courses_list_handler(request, action):
"""
Return courses based on request params
"""
try:
user = request.user
except:
user = AnonymousUser()
if action not in ["homefalls", "all", "hot", "latest", "my", "search", "rolling"]:
return JsonResponse({"success": False, "errmsg": "not support other actions except homefalls all hot latest rolling and my"})
def get_courses_depend_action():
"""
Return courses depend on action
action: [homefalls, hot, lastest, my, search]
homefalls: get all courses
hot: Number of attended people > ?
lastest: News last week
my: I registered
all: like 'homefalls'
"""
courses = get_courses(user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
courses_list = []
if action == "latest":
default_count = 20
if len(courses) < default_count:
default_count = len(courses)
courses_list = courses[0:default_count]
elif action == "my":
# filter my registered courses
for course in courses:
if registered_for_course(course, user):
courses_list.append(course)
elif action == "rolling":
default_count = 5
courses_list = courses[0:default_count]
elif action == 'search':
keyword = request.GET.get("keyword")
if keyword:
for c in courses:
print (keyword in c.org or keyword in c.id or keyword in c.display_name_with_default)
if keyword in c.org or keyword in c.id or keyword in c.display_name_with_default:
courses_list.append(c)
else:
courses_list = courses
return courses_list
courses = get_courses_depend_action()
# get_courses_depend_action()
return return_fixed_courses(request, courses, user, action)
def _course_json(course, course_id):
locator = loc_mapper().translate_location(course_id, course.location, published=False, add_entry_if_missing=True)
is_container = course.has_children
result = {
'display_name': course.display_name,
'id': unicode(locator),
'category': course.category,
'is_draft': getattr(course, 'is_draft', False),
'is_container': is_container
}
if is_container:
result['children'] = [_course_json(child, course_id) for child in course.get_children()]
category = result['category']
if result['category'] == 'video':
result[category + '-url'] = "http://www.diandiyun.com/Clip_480_5sec_6mbps_h264.mp4"
elif result['category'] == 'problem':
result[category + '-url'] = "http://music.163.com/"
return result
def mobi_course_info(request, course, action=None):
course_logo = course_image_url(course)
imgurl = course_logo
if action in ["homefalls", "all", "hot", "latest", "my", "search"]:
try:
course_mini_info = course.id.split('/')
asset_location = StaticContent.compute_location(course_mini_info[0], course_mini_info[1], 'mobi-logo-img.jpg')
imgurl = StaticContent.get_url_path_from_location(asset_location)
except:
print "=========================fail load mobi image==============================="
print "We will load this info to log"
return {
"id": course.id.replace('/', '.'),
"name": course.display_name_with_default,
"logo": request.get_host() + course_image_url(course),
"org": course.display_org_with_default,
"course_number": course.display_number_with_default,
"start_date": course.start.strftime("%Y-%m-%d"),
"about": get_course_about_section(course, 'short_description'),
"category": course.category,
"imgurl": request.get_host() + imgurl
}
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]])
return content
def parse_updates_html_str(html_str):
try:
course_html_parsed = html.fromstring(html_str)
except:
escaped = django.utils.html.eacape(html_str)
course_html_parsed = html.fromstring(escaped)
course_upd_collection = []
if course_html_parsed.tag == 'section':
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
computer_id = len(course_html_parsed) - index
payload = {
"id": computer_id,
"date": update.findtext("h2"),
"content": content
}
course_upd_collection.append(payload)
return {"updates": course_upd_collection}
def mobi_course_action(request, course_id, action):
try:
course_id_bak = course_id.replace('.', '/')
if action in ["updates", "handouts", "structure"]:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
user = request.user
if not user:
user = AnonymousUser()
registered = registered_for_course(course, user)
if action == "updates" and registered:
course_updates = get_course_info_section(request, course, action)
return JsonResponse(parse_updates_html_str(course_updates))
elif action == "handouts" and registered:
course_handouts = get_course_info_section(request, course, action)
return JsonResponse({"handouts": course_handouts})
elif action == "structure":
return JsonResponse(_course_json(course, course.location.course_id))
else:
raise Exception
else:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
return JsonResponse(mobi_course_info(request, course))
except:
return JsonResponse({"success": False, "errmsg": "access denied!"})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def mobi_index(request, course_id, chapter=None, section=None,
position=None):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('mobi_student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('wechat/mobi_courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
def mobi_directory(request, course_id):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
motoc = mobi_toc_for_course(user, request, course)
show_list = list()
for toc in motoc:
videolist = toc['show_url'][0]
show_list.append(videolist)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': mobi_render_accordion(request, course),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
'show_url': show_list[0],
}
result = render_to_response('wechat/mobi_directory.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},".format(
user=user,
course=course,))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
def mobi_render_accordion(request, course):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = mobi_toc_for_course(user, request, course)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('wechat/mobi_accordion.html', context)
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
Location('i4x', course_location.org, course_location.course, None, module_id),
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_id)
context = {
'request': request,
'course_id': course_id,
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'reverifications': reverifications,
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_id,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id)
# see if we have already filled up all allowed enrollments
is_course_full = CourseEnrollment.is_course_full(course)
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_id}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response(
'courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, course_id, student_id)
def _progress(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_id):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_id)
course = course_from_id(course_id)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
def show_video(request):
showurl = request.GET.get("showurl","")
course_id = request.GET.get("course_id")
return render_to_response('wechat/mobi_video.html',{"showurl":showurl, "course_id": course_id}) | agpl-3.0 |
Gustry/inasafe | safe/utilities/resources.py | 2 | 3495 | # coding=utf-8
"""This module contains utilities for locating application resources (img etc).
"""
import os
import codecs
# This import is to enable SIP API V2
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
from PyQt4 import QtCore, uic
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
def html_footer():
"""Get a standard html footer for wrapping content in.
:returns: A header containing a web page closing content in html - up to
and including the body close tag.
:rtype: str
"""
file_path = os.path.join(resources_path(), 'footer.html')
with file(file_path) as header_file:
content = header_file.read()
return content
def html_header():
"""Get a standard html header for wrapping content in.
:returns: A header containing a web page preamble in html - up to and
including the body open tag.
:rtype: str
"""
file_path = os.path.join(resources_path(), 'header.html')
with codecs.open(file_path, 'r', encoding='utf8') as header_file:
content = header_file.read()
content = content.replace('PATH', resources_path())
return content
def html_help_header():
"""Get a standard help html header for wrapping content in.
This one differs from the normal html header in that it will
include style rules to number headings.
..versionadded:: 4.0
:returns: A header containing a web page preamble in html - up to and
including the body open tag.
:rtype: str
"""
file_path = os.path.join(resources_path(), 'header-help.html')
with codecs.open(file_path, 'r', encoding='utf8') as header_file:
content = header_file.read()
content = content.replace('PATH', resources_path())
return content
def resources_path(*args):
"""Get the path to our resources folder.
.. versionadded:: 3.0
Note that in version 3.0 we removed the use of Qt Resource files in
favour of directly accessing on-disk resources.
:param args List of path elements e.g. ['img', 'logos', 'image.png']
:type args: str
:return: Absolute path to the resources folder.
:rtype: str
"""
path = os.path.dirname(__file__)
path = os.path.abspath(
os.path.join(path, os.path.pardir, os.path.pardir, 'resources'))
for item in args:
path = os.path.abspath(os.path.join(path, item))
return path
def resource_url(path):
"""Get the a local filesystem url to a given resource.
.. versionadded:: 3.0
Note that in version 3.0 we removed the use of Qt Resource files in
favour of directly accessing on-disk resources.
:param path: Path to resource e.g. /home/timlinux/foo/bar.png
:type path: str
:return: A valid file url e.g. file:///home/timlinux/foo/bar.png
:rtype: str
"""
url = QtCore.QUrl.fromLocalFile(path)
return str(url.toString())
def get_ui_class(ui_file):
"""Get UI Python class from .ui file.
Can be filename.ui or subdirectory/filename.ui
:param ui_file: The file of the ui in safe.gui.ui
:type ui_file: str
"""
os.path.sep.join(ui_file.split('/'))
ui_file_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.pardir,
'gui',
'ui',
ui_file
)
)
return uic.loadUiType(ui_file_path)[0]
| gpl-3.0 |
aboganas/frappe | frappe/commands/translate.py | 6 | 2622 | from __future__ import unicode_literals, absolute_import
import click
import frappe
from frappe.commands import pass_context, get_site
# translation
@click.command('build-message-files')
@pass_context
def build_message_files(context):
"Build message files for translation"
import frappe.translate
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.rebuild_all_translation_files()
finally:
frappe.destroy()
@click.command('new-language') #, help="Create lang-code.csv for given app")
@pass_context
@click.argument('lang_code') #, help="Language code eg. en")
@click.argument('app') #, help="App name eg. frappe")
def new_language(context, lang_code, app):
"""Create lang-code.csv for given app"""
import frappe.translate
if not context['sites']:
raise Exception('--site is required')
# init site
frappe.connect(site=context['sites'][0])
frappe.translate.write_translations_file(app, lang_code)
print "File created at ./apps/{app}/{app}/translations/{lang_code}.csv".format(app=app, lang_code=lang_code)
print "You will need to add the language in frappe/geo/languages.json, if you haven't done it already."
@click.command('get-untranslated')
@click.argument('lang')
@click.argument('untranslated_file')
@click.option('--all', default=False, is_flag=True, help='Get all message strings')
@pass_context
def get_untranslated(context, lang, untranslated_file, all=None):
"Get untranslated strings for language"
import frappe.translate
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.get_untranslated(lang, untranslated_file, get_all=all)
finally:
frappe.destroy()
@click.command('update-translations')
@click.argument('lang')
@click.argument('untranslated_file')
@click.argument('translated-file')
@pass_context
def update_translations(context, lang, untranslated_file, translated_file):
"Update translated strings"
import frappe.translate
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.update_translations(lang, untranslated_file, translated_file)
finally:
frappe.destroy()
@click.command('import-translations')
@click.argument('lang')
@click.argument('path')
@pass_context
def import_translations(context, lang, path):
"Update translated strings"
import frappe.translate
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.import_translations(lang, path)
finally:
frappe.destroy()
commands = [
build_message_files,
get_untranslated,
import_translations,
new_language,
update_translations,
]
| mit |
Venturi/cms | env/lib/python2.7/site-packages/phonenumbers/shortdata/region_IL.py | 11 | 1057 | """Auto-generated file, do not edit by hand. IL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IL = PhoneMetadata(id='IL', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,4}', possible_number_pattern='\\d{3,5}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:0[012]|12)', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0(?:[012]|400)|1(?:[013-9]\\d|2)|[2-9]\\d{2})', possible_number_pattern='\\d{3,5}', example_number='1455'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='10400', possible_number_pattern='\\d{5}', example_number='10400'),
short_data=True)
| gpl-2.0 |
aapav01/android_kernel_samsung_ms013g-2 | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
justasabc/python_tutorials | module/beautifulsoup4-4.3.1/bs4/tests/test_builder_registry.py | 485 | 5374 | """Tests of the builder registry."""
import unittest
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry as registry,
HTMLParserTreeBuilder,
TreeBuilderRegistry,
)
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
try:
from bs4.builder import (
LXMLTreeBuilderForXML,
LXMLTreeBuilder,
)
LXML_PRESENT = True
except ImportError:
LXML_PRESENT = False
class BuiltInRegistryTest(unittest.TestCase):
"""Test the built-in registry with the default builders registered."""
def test_combination(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('fast', 'html'),
LXMLTreeBuilder)
if LXML_PRESENT:
self.assertEqual(registry.lookup('permissive', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('strict', 'html'),
HTMLParserTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib', 'html'),
HTML5TreeBuilder)
def test_lookup_by_markup_type(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('html'), LXMLTreeBuilder)
self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML)
else:
self.assertEqual(registry.lookup('xml'), None)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html'), HTML5TreeBuilder)
else:
self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder)
def test_named_library(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('lxml', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('lxml', 'html'),
LXMLTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib'),
HTML5TreeBuilder)
self.assertEqual(registry.lookup('html.parser'),
HTMLParserTreeBuilder)
def test_beautifulsoup_constructor_does_lookup(self):
# You can pass in a string.
BeautifulSoup("", features="html")
# Or a list of strings.
BeautifulSoup("", features=["html", "fast"])
# You'll get an exception if BS can't find an appropriate
# builder.
self.assertRaises(ValueError, BeautifulSoup,
"", features="no-such-feature")
class RegistryTest(unittest.TestCase):
"""Test the TreeBuilderRegistry class in general."""
def setUp(self):
self.registry = TreeBuilderRegistry()
def builder_for_features(self, *feature_list):
cls = type('Builder_' + '_'.join(feature_list),
(object,), {'features' : feature_list})
self.registry.register(cls)
return cls
def test_register_with_no_features(self):
builder = self.builder_for_features()
# Since the builder advertises no features, you can't find it
# by looking up features.
self.assertEqual(self.registry.lookup('foo'), None)
# But you can find it by doing a lookup with no features, if
# this happens to be the only registered builder.
self.assertEqual(self.registry.lookup(), builder)
def test_register_with_features_makes_lookup_succeed(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('foo'), builder)
self.assertEqual(self.registry.lookup('bar'), builder)
def test_lookup_fails_when_no_builder_implements_feature(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('baz'), None)
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
builder1 = self.builder_for_features('foo')
builder2 = self.builder_for_features('bar')
self.assertEqual(self.registry.lookup(), builder2)
def test_lookup_fails_when_no_tree_builders_registered(self):
self.assertEqual(self.registry.lookup(), None)
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
has_one = self.builder_for_features('foo')
has_the_other = self.builder_for_features('bar')
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
lacks_one = self.builder_for_features('bar')
has_the_other = self.builder_for_features('foo')
# There are two builders featuring 'foo' and 'bar', but
# the one that also features 'quux' was registered later.
self.assertEqual(self.registry.lookup('foo', 'bar'),
has_both_late)
# There is only one builder featuring 'foo', 'bar', and 'baz'.
self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'),
has_both_early)
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
builder1 = self.builder_for_features('foo', 'bar')
builder2 = self.builder_for_features('foo', 'baz')
self.assertEqual(self.registry.lookup('bar', 'baz'), None)
| gpl-3.0 |
ovnicraft/openerp-restaurant | hr_timesheet/__openerp__.py | 6 | 2425 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Time Tracking',
'version': '1.0',
'category': 'Human Resources',
'sequence': 23,
'description': """
This module implements a timesheet system.
==========================================
Each employee can encode and track their time spent on the different projects.
A project is an analytic account and the time spent on a project generates costs on
the analytic account.
Lots of reporting on time and employee tracking are provided.
It is completely integrated with the cost accounting module. It allows you to set
up a management by affair.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_timesheet_lines.jpeg'],
'depends': ['account', 'hr', 'base', 'hr_attendance', 'process'],
'data': [
'security/ir.model.access.csv',
'security/hr_timesheet_security.xml',
'hr_timesheet_view.xml',
'hr_timesheet_report.xml',
'hr_timesheet_wizard.xml',
'process/hr_timesheet_process.xml',
'wizard/hr_timesheet_sign_in_out_view.xml',
'hr_timesheet_installer.xml',
'hr_timesheet_data.xml'
],
'demo': ['hr_timesheet_demo.xml'],
'test': [
'test/hr_timesheet_users.yml',
'test/test_hr_timesheet.yml',
'test/hr_timesheet_demo.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CamelBackNotation/CarnotKE | jyhton/Lib/test/test_inspect.py | 10 | 33875 | import re
import sys
import types
import unittest
import inspect
import linecache
import datetime
from UserList import UserList
from UserDict import UserDict
from test.test_support import run_unittest, check_py3k_warnings, is_jython
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
quiet=True):
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
# C module for test_findsource_binary, but note it's not C for Jython :)
import unicodedata
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
import __builtin__
try:
1 // 0
except:
tb = sys.exc_traceback
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.assertTrue(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.assertFalse(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in xrange(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
#self.istest(inspect.isbuiltin, 'sys.exit') # Not valid for Jython
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.iscode, 'mod.spam.func_code')
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.ismethod, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.istraceback, 'tb')
self.istest(inspect.isdatadescriptor, '__builtin__.file.closed')
self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
self.istest(inspect.isgenerator, '(x for x in xrange(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
if hasattr(types, 'MemberDescriptorType'):
# Not valid for Jython
# self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
pass
else:
self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
self.assertTrue(inspect.isroutine([].count))
def test_isclass(self):
self.istest(inspect.isclass, 'mod.StupidGit')
self.assertTrue(inspect.isclass(list))
class newstyle(object): pass
self.assertTrue(inspect.isclass(newstyle))
class CustomGetattr(object):
def __getattr__(self, attr):
return None
self.assertFalse(inspect.isclass(CustomGetattr()))
def test_get_slot_members(self):
class C(object):
__slots__ = ("a", "b")
x = C()
x.a = 42
members = dict(inspect.getmembers(x))
self.assertIn('a', members)
self.assertNotIn('b', members)
def test_isabstract(self):
from abc import ABCMeta, abstractmethod
class AbstractClassExample(object):
__metaclass__ = ABCMeta
@abstractmethod
def foo(self):
pass
class ClassExample(AbstractClassExample):
def foo(self):
pass
a = ClassExample()
# Test general behaviour.
self.assertTrue(inspect.isabstract(AbstractClassExample))
self.assertFalse(inspect.isabstract(ClassExample))
self.assertFalse(inspect.isabstract(a))
self.assertFalse(inspect.isabstract(int))
self.assertFalse(inspect.isabstract(5))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assertTrue(len(mod.st) >= 5)
self.assertEqual(mod.st[0][1:],
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(mod.st[1][1:],
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(mod.st[2][1:],
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(mod.st[3][1:],
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(git.tr[0][1:], (modfile, 43, 'argue',
[' spam(a, b, c)\n'], 0))
self.assertEqual(git.tr[1][1:], (modfile, 9, 'spam',
[' eggs(b + d, c + f)\n'], 0))
self.assertEqual(git.tr[2][1:], (modfile, 18, 'eggs',
[' q = y // 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', ['e', ['f']]])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderFile = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
with open(inspect.getsourcefile(self.fodderFile)) as fp:
self.source = fp.read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderFile = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["__builtin__"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(inspect.getsourcefile(mod.spam), modfile)
self.assertEqual(inspect.getsourcefile(git.abuse), modfile)
fn = "_non_existing_filename_used_for_sourcefile_test.py"
co = compile("None", fn, "exec")
self.assertEqual(inspect.getsourcefile(co), None)
linecache.cache[co.co_filename] = (1, None, "None", co.co_filename)
self.assertEqual(inspect.getsourcefile(co), fn)
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec "def x(): pass" in m.__dict__
self.assertEqual(inspect.getsourcefile(m.x.func_code), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
def test_proceed_with_fake_filename(self):
'''doctest monkeypatches linecache to enable inspection'''
fn, source = '<test>', 'def x(): pass\n'
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if filename == fn:
return source.splitlines(True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec compile(source, fn, 'single') in ns
inspect.getsource(ns["x"])
finally:
linecache.getlines = getlines
class TestDecorators(GetSourceBase):
fodderFile = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderFile = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderFile = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
@unittest.skipIf(
not hasattr(unicodedata, '__file__') or
unicodedata.__file__[-4:] in (".pyc", ".pyo") or unicodedata.__file__.endswith('$py.class'),
"unicodedata is not an external binary module")
def test_findsource_binary(self):
self.assertRaises(IOError, inspect.getsource, unicodedata)
self.assertRaises(IOError, inspect.findsource, unicodedata)
@unittest.skipIf(is_jython, "Not working")
def test_findsource_code_in_linecache(self):
lines = ["x=1"]
co = compile(lines[0], "_dynamically_created_file", "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
linecache.cache[co.co_filename] = (1, None, lines, co.co_filename)
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
class _BrokenDataDescriptor(object):
"""
A broken data descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ data descriptors")
def __set__(*args):
raise RuntimeError
def __getattr__(*args):
raise AssertionError("should not __getattr__ data descriptors")
class _BrokenMethodDescriptor(object):
"""
A broken method descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ method descriptors")
def __getattr__(*args):
raise AssertionError("should not __getattr__ method descriptors")
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_classic_mro(self):
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e = None,
varkw_e = None, defaults_e = None,
formatted = None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted = '(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', ['e', ['f']]],
'g', 'h', (3, (4, (5,))),
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_getargspec_sublistofone(self):
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
("parenthesized argument names are invalid", SyntaxWarning)):
exec 'def sublistOfOne((foo,)): return 1'
self.assertArgSpecEquals(sublistOfOne, [['foo']])
exec 'def fakeSublistOfOne((foo)): return 1'
self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
def _classify_test(self, newstyle):
"""Helper for testing that classify_class_attrs finds a bunch of
different kinds of attributes on a given class.
"""
if newstyle:
base = object
else:
class base:
pass
class A(base):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
attrs = attrs_wo_objs(A)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
if newstyle:
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
else:
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
def test_classify_oldstyle(self):
"""classify_class_attrs finds static methods, class methods,
properties, normal methods, and data attributes on an old-style
class.
"""
self._classify_test(False)
def test_classify_newstyle(self):
"""Just like test_classify_oldstyle, but for a new-style class.
"""
self._classify_test(True)
def test_classify_builtin_types(self):
# Simple sanity check that all built-in types can have their
# attributes classified.
for name in dir(__builtin__):
builtin = getattr(__builtin__, name)
if isinstance(builtin, type):
inspect.classify_class_attrs(builtin)
def test_getmembers_method(self):
# Old-style classes
class B:
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
# contrary to spec, ismethod() is also True for unbound methods
# (see #1785)
self.assertIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
# New-style classes
class B(object):
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
self.assertIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
class TestGetcallargsFunctions(unittest.TestCase):
# It's possible to get both the tuple parameters AND the unpacked
# parameters by using locals(). However, they are named
# differently in CPython and Jython:
#
# * For CPython, such tuple parameters are named '.1', '.2', etc.
# * For Jython, they are actually the formal parameter, eg '(d, (e, f))'
#
# In both cases, we ignore in testing - they are in fact unpacked
is_tuplename = re.compile(r'(?:^\.\d+$)|(?:^\()').match
def assertEqualCallArgs(self, func, call_params_string, locs=None):
locs = dict(locs or {}, func=func)
r1 = eval('func(%s)' % call_params_string, None, locs)
r2 = eval('inspect.getcallargs(func, %s)' % call_params_string, None,
locs)
self.assertEqual(r1, r2)
def assertEqualException(self, func, call_param_string, locs=None):
locs = dict(locs or {}, func=func)
try:
eval('func(%s)' % call_param_string, None, locs)
except Exception, ex1:
pass
else:
self.fail('Exception not raised')
try:
eval('inspect.getcallargs(func, %s)' % call_param_string, None,
locs)
except Exception, ex2:
pass
else:
self.fail('Exception not raised')
self.assertIs(type(ex1), type(ex2))
self.assertEqual(str(ex1), str(ex2))
def makeCallable(self, signature):
"""Create a function that returns its locals(), excluding the
autogenerated '.1', '.2', etc. tuple param names (if any)."""
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
quiet=True):
code = ("lambda %s: dict(i for i in locals().items() "
"if not is_tuplename(i[0]))")
return eval(code % signature, {'is_tuplename' : self.is_tuplename})
def test_plain(self):
f = self.makeCallable('a, b=1')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, 'b=3, a=2')
self.assertEqualCallArgs(f, '2, b=3')
# expand *iterable / **mapping
self.assertEqualCallArgs(f, '*(2,)')
self.assertEqualCallArgs(f, '*[2]')
self.assertEqualCallArgs(f, '*(2, 3)')
self.assertEqualCallArgs(f, '*[2, 3]')
self.assertEqualCallArgs(f, '**{"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{"a":2}')
self.assertEqualCallArgs(f, '2, **{"b":3}')
self.assertEqualCallArgs(f, '**{"b":3, "a":2}')
# expand UserList / UserDict
self.assertEqualCallArgs(f, '*UserList([2])')
self.assertEqualCallArgs(f, '*UserList([2, 3])')
self.assertEqualCallArgs(f, '**UserDict(a=2)')
self.assertEqualCallArgs(f, '2, **UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **UserDict(a=3)')
# unicode keyword args
self.assertEqualCallArgs(f, '**{u"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{u"a":2}')
self.assertEqualCallArgs(f, '2, **{u"b":3}')
self.assertEqualCallArgs(f, '**{u"b":3, u"a":2}')
def test_varargs(self):
f = self.makeCallable('a, b=1, *c')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, '2, 3, 4')
self.assertEqualCallArgs(f, '*(2,3,4)')
self.assertEqualCallArgs(f, '2, *[3,4]')
self.assertEqualCallArgs(f, '2, 3, *UserList([4])')
def test_varkw(self):
f = self.makeCallable('a, b=1, **c')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, '2, b=3, c=4')
self.assertEqualCallArgs(f, 'b=3, a=2, c=4')
self.assertEqualCallArgs(f, 'c=4, **{"a":2, "b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{"a":3, "c":4}')
self.assertEqualCallArgs(f, '**UserDict(a=2, b=3, c=4)')
self.assertEqualCallArgs(f, '2, c=4, **UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **UserDict(a=3, c=4)')
# unicode keyword args
self.assertEqualCallArgs(f, 'c=4, **{u"a":2, u"b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{u"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{u"a":3, u"c":4}')
def test_varkw_only(self):
# issue11256:
f = self.makeCallable('**c')
self.assertEqualCallArgs(f, '')
self.assertEqualCallArgs(f, 'a=1')
self.assertEqualCallArgs(f, 'a=1, b=2')
self.assertEqualCallArgs(f, 'c=3, **{"a": 1, "b": 2}')
self.assertEqualCallArgs(f, '**UserDict(a=1, b=2)')
self.assertEqualCallArgs(f, 'c=3, **UserDict(a=1, b=2)')
def test_tupleargs(self):
f = self.makeCallable('(b,c), (d,(e,f))=(0,[1,2])')
self.assertEqualCallArgs(f, '(2,3)')
self.assertEqualCallArgs(f, '[2,3]')
self.assertEqualCallArgs(f, 'UserList([2,3])')
self.assertEqualCallArgs(f, '(2,3), (4,(5,6))')
self.assertEqualCallArgs(f, '(2,3), (4,[5,6])')
self.assertEqualCallArgs(f, '(2,3), [4,UserList([5,6])]')
def test_multiple_features(self):
f = self.makeCallable('a, b=2, (c,(d,e))=(3,[4,5]), *f, **g')
self.assertEqualCallArgs(f, '2, 3, (4,[5,6]), 7')
self.assertEqualCallArgs(f, '2, 3, *[(4,[5,6]), 7], x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9')
self.assertEqualCallArgs(f, 'x=8, *UserList([2, 3, (4,[5,6])]), '
'**{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *UserList([3, (4,[5,6])]), '
'**UserDict(y=9, z=10)')
def test_errors(self):
f0 = self.makeCallable('')
f1 = self.makeCallable('a, b')
f2 = self.makeCallable('a, b=1')
# f0 takes no arguments
self.assertEqualException(f0, '1')
self.assertEqualException(f0, 'x=1')
self.assertEqualException(f0, '1,x=1')
# f1 takes exactly 2 arguments
self.assertEqualException(f1, '')
self.assertEqualException(f1, '1')
self.assertEqualException(f1, 'a=2')
self.assertEqualException(f1, 'b=3')
# f2 takes at least 1 argument
self.assertEqualException(f2, '')
self.assertEqualException(f2, 'b=3')
for f in f1, f2:
# f1/f2 takes exactly/at most 2 arguments
self.assertEqualException(f, '2, 3, 4')
self.assertEqualException(f, '1, 2, 3, a=1')
self.assertEqualException(f, '2, 3, 4, c=5')
self.assertEqualException(f, '2, 3, 4, a=1, c=5')
# f got an unexpected keyword argument
self.assertEqualException(f, 'c=2')
self.assertEqualException(f, '2, c=3')
self.assertEqualException(f, '2, 3, c=4')
self.assertEqualException(f, '2, c=4, b=3')
self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
# f got multiple values for keyword argument
self.assertEqualException(f, '1, a=2')
self.assertEqualException(f, '1, **{"a":2}')
self.assertEqualException(f, '1, 2, b=3')
# XXX: Python inconsistency
# - for functions and bound methods: unexpected keyword 'c'
# - for unbound methods: multiple values for keyword 'a'
#self.assertEqualException(f, '1, c=3, a=2')
f = self.makeCallable('(a,b)=(0,1)')
self.assertEqualException(f, '1')
self.assertEqualException(f, '[1]')
self.assertEqualException(f, '(1,2,3)')
# issue11256:
f3 = self.makeCallable('**c')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
class Foo(object):
pass
self.cls = Foo
self.inst = Foo()
def makeCallable(self, signature):
assert 'self' not in signature
mk = super(TestGetcallargsMethods, self).makeCallable
self.cls.method = mk('self, ' + signature)
return self.inst.method
class TestGetcallargsUnboundMethods(TestGetcallargsMethods):
def makeCallable(self, signature):
super(TestGetcallargsUnboundMethods, self).makeCallable(signature)
return self.cls.method
def assertEqualCallArgs(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualCallArgs(
*self._getAssertEqualParams(func, call_params_string, locs))
def assertEqualException(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualException(
*self._getAssertEqualParams(func, call_params_string, locs))
def _getAssertEqualParams(self, func, call_params_string, locs=None):
assert 'inst' not in call_params_string
locs = dict(locs or {}, inst=self.inst)
return (func, 'inst,' + call_params_string, locs)
def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
TestGetcallargsFunctions, TestGetcallargsMethods,
TestGetcallargsUnboundMethods)
if __name__ == "__main__":
test_main()
| apache-2.0 |
sahutd/youtube-dl | youtube_dl/extractor/common.py | 3 | 47316 | from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
ExtractorError,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
)
_NO_DEFAULT = object()
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", or "m3u8_native".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A url pointing to the subtitles file
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title" and "id" attributes with the same
semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a url that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not _NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return None
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest')
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
manifest_url = ('/'.join(manifest_url.split('/')[:-1]) + '/' +
(media_el.attrib.get('href') or media_el.attrib.get('url')))
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
'preference': preference,
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None):
formats = [{
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information')
last_info = None
last_media = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#EXT-X-MEDIA:'):
last_media = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_media[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
# TODO: improve extraction
def _extract_smil_formats(self, smil_url, video_id, fatal=True):
smil = self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal)
if smil is False:
assert not fatal
return []
base = smil.find('./head/meta').get('base')
formats = []
rtmp_count = 0
if smil.findall('./body/seq/video'):
video = smil.findall('./body/seq/video')[0]
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
formats.extend(fmts)
else:
for video in smil.findall('./body/switch/video'):
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
formats.extend(fmts)
self._sort_formats(formats)
return formats
def _parse_smil_video(self, video, video_id, base, rtmp_count):
src = video.get('src')
if not src:
return ([], rtmp_count)
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
if not proto:
if base:
if base.startswith('rtmp'):
proto = 'rtmp'
elif base.startswith('http'):
proto = 'http'
ext = video.get('ext')
if proto == 'm3u8':
return (self._extract_m3u8_formats(src, video_id, ext), rtmp_count)
elif proto == 'rtmp':
rtmp_count += 1
streamer = video.get('streamer') or base
return ([{
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
}], rtmp_count)
elif proto.startswith('http'):
return ([{
'url': base + src,
'ext': ext or 'flv',
'tbr': bitrate,
'width': width,
'height': height,
}], rtmp_count)
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| unlicense |
marespiaut/ndifloss | python-twitter/tests/test_trend.py | 16 | 1648 | import twitter
import unittest
import json
class TrendTest(unittest.TestCase):
SAMPLE_JSON = '''{"name": "Kesuke Miyagi", "query": "Kesuke Miyagi"}'''
def _GetSampleTrend(self):
return twitter.Trend(name='Kesuke Miyagi',
query='Kesuke Miyagi',
timestamp='Fri Jan 26 23:17:14 +0000 2007')
def testInit(self):
'''Test the twitter.Trend constructor'''
trend = twitter.Trend(name='Kesuke Miyagi',
query='Kesuke Miyagi',
timestamp='Fri Jan 26 23:17:14 +0000 2007')
def testProperties(self):
'''Test all of the twitter.Trend properties'''
trend = twitter.Trend()
trend.name = 'Kesuke Miyagi'
self.assertEqual('Kesuke Miyagi', trend.name)
trend.query = 'Kesuke Miyagi'
self.assertEqual('Kesuke Miyagi', trend.query)
trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', trend.timestamp)
def testNewFromJsonDict(self):
'''Test the twitter.Trend NewFromJsonDict method'''
data = json.loads(TrendTest.SAMPLE_JSON)
trend = twitter.Trend.NewFromJsonDict(data, timestamp='Fri Jan 26 23:17:14 +0000 2007')
self.assertEqual(self._GetSampleTrend(), trend)
def testEq(self):
'''Test the twitter.Trend __eq__ method'''
trend = twitter.Trend()
trend.name = 'Kesuke Miyagi'
trend.query = 'Kesuke Miyagi'
trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual(trend, self._GetSampleTrend())
| mit |
indrajitr/ansible-modules-extras | cloud/rackspace/rax_mon_check.py | 153 | 11151 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_mon_check
short_description: Create or delete a Rackspace Cloud Monitoring check for an
existing entity.
description:
- Create or delete a Rackspace Cloud Monitoring check associated with an
existing rax_mon_entity. A check is a specific test or measurement that is
performed, possibly from different monitoring zones, on the systems you
monitor. Rackspace monitoring module flow | rax_mon_entity ->
*rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that a check with this C(label) exists or does not exist.
choices: ["present", "absent"]
entity_id:
description:
- ID of the rax_mon_entity to target with this check.
required: true
label:
description:
- Defines a label for this check, between 1 and 64 characters long.
required: true
check_type:
description:
- The type of check to create. C(remote.) checks may be created on any
rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
that have a non-null C(agent_id).
choices:
- remote.dns
- remote.ftp-banner
- remote.http
- remote.imap-banner
- remote.mssql-banner
- remote.mysql-banner
- remote.ping
- remote.pop3-banner
- remote.postgresql-banner
- remote.smtp-banner
- remote.smtp
- remote.ssh
- remote.tcp
- remote.telnet-banner
- agent.filesystem
- agent.memory
- agent.load_average
- agent.cpu
- agent.disk
- agent.network
- agent.plugin
required: true
monitoring_zones_poll:
description:
- Comma-separated list of the names of the monitoring zones the check should
run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
target_hostname:
description:
- One of `target_hostname` and `target_alias` is required for remote.* checks,
but prohibited for agent.* checks. The hostname this check should target.
Must be a valid IPv4, IPv6, or FQDN.
target_alias:
description:
- One of `target_alias` and `target_hostname` is required for remote.* checks,
but prohibited for agent.* checks. Use the corresponding key in the entity's
`ip_addresses` hash to resolve an IP address to target.
details:
description:
- Additional details specific to the check type. Must be a hash of strings
between 1 and 255 characters long, or an array or object containing 0 to
256 items.
disabled:
description:
- If "yes", ensure the check is created, but don't actually use it yet.
choices: [ "yes", "no" ]
metadata:
description:
- Hash of arbitrary key-value pairs to accompany this check if it fires.
Keys and values must be strings between 1 and 255 characters long.
period:
description:
- The number of seconds between each time the check is performed. Must be
greater than the minimum period set on your account.
timeout:
description:
- The number of seconds this check will wait when attempting to collect
results. Must be less than the period.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a monitoring check
gather_facts: False
hosts: local
connection: local
tasks:
- name: Associate a check with an existing entity.
rax_mon_check:
credentials: ~/.rax_pub
state: present
entity_id: "{{ the_entity['entity']['id'] }}"
label: the_check
check_type: remote.ping
monitoring_zones_poll: mziad,mzord,mzdfw
details:
count: 10
meta:
hurf: durf
register: the_check
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_check(module, state, entity_id, label, check_type,
monitoring_zones_poll, target_hostname, target_alias, details,
disabled, metadata, period, timeout):
# Coerce attributes.
if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
monitoring_zones_poll = [monitoring_zones_poll]
if period:
period = int(period)
if timeout:
timeout = int(timeout)
changed = False
check = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
entity = cm.get_entity(entity_id)
if not entity:
module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
' a valid entity id.' % entity_id)
existing = [e for e in entity.list_checks() if e.label == label]
if existing:
check = existing[0]
if state == 'present':
if len(existing) > 1:
module.fail_json(msg='%s existing checks have a label of %s.' %
(len(existing), label))
should_delete = False
should_create = False
should_update = False
if check:
# Details may include keys set to default values that are not
# included in the initial creation.
#
# Only force a recreation of the check if one of the *specified*
# keys is missing or has a different value.
if details:
for (key, value) in details.iteritems():
if key not in check.details:
should_delete = should_create = True
elif value != check.details[key]:
should_delete = should_create = True
should_update = label != check.label or \
(target_hostname and target_hostname != check.target_hostname) or \
(target_alias and target_alias != check.target_alias) or \
(disabled != check.disabled) or \
(metadata and metadata != check.metadata) or \
(period and period != check.period) or \
(timeout and timeout != check.timeout) or \
(monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
if should_update and not should_delete:
check.update(label=label,
disabled=disabled,
metadata=metadata,
monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout,
period=period,
target_alias=target_alias,
target_hostname=target_hostname)
changed = True
else:
# The check doesn't exist yet.
should_create = True
if should_delete:
check.delete()
if should_create:
check = cm.create_check(entity,
label=label,
check_type=check_type,
target_hostname=target_hostname,
target_alias=target_alias,
monitoring_zones_poll=monitoring_zones_poll,
details=details,
disabled=disabled,
metadata=metadata,
period=period,
timeout=timeout)
changed = True
elif state == 'absent':
if check:
check.delete()
changed = True
else:
module.fail_json(msg='state must be either present or absent.')
if check:
check_dict = {
"id": check.id,
"label": check.label,
"type": check.type,
"target_hostname": check.target_hostname,
"target_alias": check.target_alias,
"monitoring_zones_poll": check.monitoring_zones_poll,
"details": check.details,
"disabled": check.disabled,
"metadata": check.metadata,
"period": check.period,
"timeout": check.timeout
}
module.exit_json(changed=changed, check=check_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
entity_id=dict(required=True),
label=dict(required=True),
check_type=dict(required=True),
monitoring_zones_poll=dict(),
target_hostname=dict(),
target_alias=dict(),
details=dict(type='dict', default={}),
disabled=dict(type='bool', default=False),
metadata=dict(type='dict', default={}),
period=dict(type='int'),
timeout=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
entity_id = module.params.get('entity_id')
label = module.params.get('label')
check_type = module.params.get('check_type')
monitoring_zones_poll = module.params.get('monitoring_zones_poll')
target_hostname = module.params.get('target_hostname')
target_alias = module.params.get('target_alias')
details = module.params.get('details')
disabled = module.boolean(module.params.get('disabled'))
metadata = module.params.get('metadata')
period = module.params.get('period')
timeout = module.params.get('timeout')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_check(module, state, entity_id, label, check_type,
monitoring_zones_poll, target_hostname, target_alias, details,
disabled, metadata, period, timeout)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
main()
| gpl-3.0 |
beni55/edx-platform | common/djangoapps/track/views/segmentio.py | 17 | 10452 | """Handle events that were forwarded from the segment.io webhook integration"""
import datetime
import json
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django_future.csrf import csrf_exempt
from eventtracking import tracker
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from util.json_request import expect_json
log = logging.getLogger(__name__)
ERROR_UNAUTHORIZED = 'Unauthorized'
WARNING_IGNORED_SOURCE = 'Source ignored'
WARNING_IGNORED_TYPE = 'Type ignored'
ERROR_MISSING_USER_ID = 'Required user_id missing from context'
ERROR_USER_NOT_EXIST = 'Specified user does not exist'
ERROR_INVALID_USER_ID = 'Unable to parse userId as an integer'
ERROR_MISSING_DATA = 'The data field must be specified in the properties dictionary'
ERROR_MISSING_NAME = 'The name field must be specified in the properties dictionary'
ERROR_MISSING_TIMESTAMP = 'Required timestamp field not found'
ERROR_MISSING_RECEIVED_AT = 'Required receivedAt field not found'
@require_POST
@expect_json
@csrf_exempt
def segmentio_event(request):
"""
An endpoint for logging events using segment.io's webhook integration.
segment.io provides a custom integration mechanism that initiates a request to a configurable URL every time an
event is received by their system. This endpoint is designed to receive those requests and convert the events into
standard tracking log entries.
For now we limit the scope of handled events to track and screen events from mobile devices. In the future we could
enable logging of other types of events, however, there is significant overlap with our non-segment.io based event
tracking. Given that segment.io is closed third party solution we are limiting its required usage to just
collecting events from mobile devices for the time being.
Many of the root fields of a standard edX tracking event are read out of the "properties" dictionary provided by the
segment.io event, which is, in turn, provided by the client that emitted the event.
In order for an event to be accepted and logged the "key" query string parameter must exactly match the django
setting TRACKING_SEGMENTIO_WEBHOOK_SECRET. While the endpoint is public, we want to limit access to it to the
segment.io servers only.
"""
# Validate the security token. We must use a query string parameter for this since we cannot customize the POST body
# in the segment.io webhook configuration, we can only change the URL that they call, so we force this token to be
# included in the URL and reject any requests that do not include it. This also assumes HTTPS is used to make the
# connection between their server and ours.
expected_secret = getattr(settings, 'TRACKING_SEGMENTIO_WEBHOOK_SECRET', None)
provided_secret = request.GET.get('key')
if not expected_secret or provided_secret != expected_secret:
return HttpResponse(status=401)
try:
track_segmentio_event(request)
except EventValidationError as err:
log.warning(
'Unable to process event received from segment.io: message="%s" event="%s"',
str(err),
request.body
)
# Do not let the requestor know why the event wasn't saved. If the secret key is compromised this diagnostic
# information could be used to scrape useful information from the system.
return HttpResponse(status=200)
class EventValidationError(Exception):
"""Raised when an invalid event is received."""
pass
def track_segmentio_event(request): # pylint: disable=too-many-statements
"""
Record an event received from segment.io to the tracking logs.
This method assumes that the event has come from a trusted source.
The received event must meet the following conditions in order to be logged:
* The value of the "type" field of the event must be included in the list specified by the django setting
TRACKING_SEGMENTIO_ALLOWED_TYPES. In order to make use of *all* of the features segment.io offers we would have
to implement some sort of persistent storage of information contained in some actions (like identify). For now,
we defer support of those actions and just support a limited set that can be handled without storing information
in external state.
* The value of the standard "userId" field of the event must be an integer that can be used to look up the user
using the primary key of the User model.
* Include a "name" field in the properties dictionary that indicates the edX event name. Note this can differ
from the "event" field found in the root of a segment.io event. The "event" field at the root of the structure is
intended to be human readable, the "name" field is expected to conform to the standard for naming events
found in the edX data documentation.
* Have originated from a known and trusted segment.io client library. The django setting
TRACKING_SEGMENTIO_SOURCE_MAP maps the known library names to internal "event_source" strings. In order to be
logged the event must have a library name that is a valid key in that map.
Additionally the event can optionally:
* Provide a "context" dictionary in the properties dictionary. This dictionary will be applied to the
existing context on the server overriding any existing keys. This context dictionary should include a "course_id"
field when the event is scoped to a particular course. The value of this field should be a valid course key. The
context may contain other arbitrary data that will be logged with the event, for example: identification
information for the device that emitted the event.
"""
# The POST body will contain the JSON encoded event
full_segment_event = request.json
# We mostly care about the properties
segment_properties = full_segment_event.get('properties', {})
# Start with the context provided by segment.io in the "client" field if it exists
# We should tightly control which fields actually get included in the event emitted.
segment_context = full_segment_event.get('context')
# Build up the event context by parsing fields out of the event received from segment.io
context = {}
library_name = segment_context.get('library', {}).get('name')
source_map = getattr(settings, 'TRACKING_SEGMENTIO_SOURCE_MAP', {})
event_source = source_map.get(library_name)
if not event_source:
raise EventValidationError(WARNING_IGNORED_SOURCE)
else:
context['event_source'] = event_source
if 'name' not in segment_properties:
raise EventValidationError(ERROR_MISSING_NAME)
if 'data' not in segment_properties:
raise EventValidationError(ERROR_MISSING_DATA)
# Ignore event types and names that are unsupported
segment_event_type = full_segment_event.get('type')
segment_event_name = segment_properties['name']
allowed_types = [a.lower() for a in getattr(settings, 'TRACKING_SEGMENTIO_ALLOWED_TYPES', [])]
disallowed_substring_names = [
a.lower() for a in getattr(settings, 'TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES', [])
]
if (
not segment_event_type or
(segment_event_type.lower() not in allowed_types) or
any(disallowed_subs_name in segment_event_name.lower() for disallowed_subs_name in disallowed_substring_names)
):
raise EventValidationError(WARNING_IGNORED_TYPE)
# create and populate application field if it doesn't exist
app_context = segment_properties.get('context', {})
if 'application' not in app_context:
context['application'] = {
'name': app_context.get('app_name', ''),
'version': '' if not segment_context else segment_context.get('app', {}).get('version', '')
}
app_context.pop('app_name', None)
if segment_context:
# copy the entire segment's context dict as a sub-field of our custom context dict
context['client'] = dict(segment_context)
context['agent'] = segment_context.get('userAgent', '')
# remove duplicate and unnecessary fields from our copy
for field in ('traits', 'integrations', 'userAgent'):
if field in context['client']:
del context['client'][field]
# Overlay any context provided in the properties
context.update(app_context)
user_id = full_segment_event.get('userId')
if not user_id:
raise EventValidationError(ERROR_MISSING_USER_ID)
# userId is assumed to be the primary key of the django User model
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise EventValidationError(ERROR_USER_NOT_EXIST)
except ValueError:
raise EventValidationError(ERROR_INVALID_USER_ID)
else:
context['user_id'] = user.id
context['username'] = user.username
# course_id is expected to be provided in the context when applicable
course_id = context.get('course_id')
if course_id:
try:
course_key = CourseKey.from_string(course_id)
context['org_id'] = course_key.org
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}" from event: {event}'.format(
course_id=course_id,
event=json.dumps(full_segment_event),
),
exc_info=True
)
if 'timestamp' in full_segment_event:
context['timestamp'] = parse_iso8601_timestamp(full_segment_event['timestamp'])
else:
raise EventValidationError(ERROR_MISSING_TIMESTAMP)
if 'receivedAt' in full_segment_event:
context['received_at'] = parse_iso8601_timestamp(full_segment_event['receivedAt'])
else:
raise EventValidationError(ERROR_MISSING_RECEIVED_AT)
context['ip'] = segment_properties.get('context', {}).get('ip', '')
with tracker.get_tracker().context('edx.segmentio', context):
tracker.emit(segment_event_name, segment_properties.get('data', {}))
def parse_iso8601_timestamp(timestamp):
"""Parse a particular type of ISO8601 formatted timestamp"""
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
| agpl-3.0 |
huntxu/neutron | neutron/tests/unit/agent/linux/openvswitch_firewall/test_rules.py | 2 | 21027 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw
from neutron.agent.linux.openvswitch_firewall import rules
from neutron.common import constants as n_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
from neutron.tests import base
TESTING_VLAN_TAG = 1
class TestIsValidPrefix(base.BaseTestCase):
def test_valid_prefix_ipv4(self):
is_valid = rules.is_valid_prefix('10.0.0.0/0')
self.assertTrue(is_valid)
def test_invalid_prefix_ipv4(self):
is_valid = rules.is_valid_prefix('0.0.0.0/0')
self.assertFalse(is_valid)
def test_valid_prefix_ipv6(self):
is_valid = rules.is_valid_prefix('ffff::0/0')
self.assertTrue(is_valid)
def test_invalid_prefix_ipv6(self):
is_valid = rules.is_valid_prefix('0000:0::0/0')
self.assertFalse(is_valid)
is_valid = rules.is_valid_prefix('::0/0')
self.assertFalse(is_valid)
is_valid = rules.is_valid_prefix('::/0')
self.assertFalse(is_valid)
class TestCreateFlowsFromRuleAndPort(base.BaseTestCase):
def setUp(self):
super(TestCreateFlowsFromRuleAndPort, self).setUp()
ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00')
ovs_port.ofport = 1
port_dict = {'device': 'port_id'}
self.port = ovsfw.OFPort(
port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
self.create_flows_mock = mock.patch.object(
rules, 'create_protocol_flows').start()
@property
def passed_flow_template(self):
return self.create_flows_mock.call_args[0][1]
def _test_create_flows_from_rule_and_port_helper(
self, rule, expected_template):
rules.create_flows_from_rule_and_port(rule, self.port)
self.assertEqual(expected_template, self.passed_flow_template)
def test_create_flows_from_rule_and_port_no_ip_ipv4(self):
rule = {
'ethertype': constants.IPv4,
'direction': constants.INGRESS_DIRECTION,
}
expected_template = {
'priority': 74,
'dl_type': n_const.ETHERTYPE_IP,
'reg_port': self.port.ofport,
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_ipv4(self):
rule = {
'ethertype': constants.IPv4,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '192.168.0.0/24',
'dest_ip_prefix': '10.0.0.1/32',
}
expected_template = {
'priority': 74,
'dl_type': n_const.ETHERTYPE_IP,
'reg_port': self.port.ofport,
'nw_src': '192.168.0.0/24',
'nw_dst': '10.0.0.1/32',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv4(self):
rule = {
'ethertype': constants.IPv4,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '192.168.0.0/24',
'dest_ip_prefix': '0.0.0.0/0',
}
expected_template = {
'priority': 74,
'dl_type': n_const.ETHERTYPE_IP,
'reg_port': self.port.ofport,
'nw_src': '192.168.0.0/24',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_no_ip_ipv6(self):
rule = {
'ethertype': constants.IPv6,
'direction': constants.INGRESS_DIRECTION,
}
expected_template = {
'priority': 74,
'dl_type': n_const.ETHERTYPE_IPV6,
'reg_port': self.port.ofport,
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_ipv6(self):
rule = {
'ethertype': constants.IPv6,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '2001:db8:bbbb::1/64',
'dest_ip_prefix': '2001:db8:aaaa::1/64',
}
expected_template = {
'priority': 74,
'dl_type': n_const.ETHERTYPE_IPV6,
'reg_port': self.port.ofport,
'ipv6_src': '2001:db8:bbbb::1/64',
'ipv6_dst': '2001:db8:aaaa::1/64',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv6(self):
rule = {
'ethertype': constants.IPv6,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '2001:db8:bbbb::1/64',
'dest_ip_prefix': '::/0',
}
expected_template = {
'priority': 74,
'dl_type': n_const.ETHERTYPE_IPV6,
'reg_port': self.port.ofport,
'ipv6_src': '2001:db8:bbbb::1/64',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
class TestCreateProtocolFlows(base.BaseTestCase):
def setUp(self):
super(TestCreateProtocolFlows, self).setUp()
ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00')
ovs_port.ofport = 1
port_dict = {'device': 'port_id'}
self.port = ovsfw.OFPort(
port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
def _test_create_protocol_flows_helper(self, direction, rule,
expected_flows):
flow_template = {'some_settings': 'foo'}
for flow in expected_flows:
flow.update(flow_template)
flows = rules.create_protocol_flows(
direction, flow_template, self.port, rule)
self.assertEqual(expected_flows, flows)
def test_create_protocol_flows_ingress(self):
rule = {'protocol': constants.PROTO_NUM_TCP}
expected_flows = [{
'table': ovs_consts.RULES_INGRESS_TABLE,
'actions': 'output:1',
'nw_proto': constants.PROTO_NUM_TCP,
}]
self._test_create_protocol_flows_helper(
constants.INGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_egress(self):
rule = {'protocol': constants.PROTO_NUM_TCP}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_TCP,
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_no_protocol(self):
rule = {}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_icmp6(self):
rule = {'ethertype': constants.IPv6,
'protocol': constants.PROTO_NUM_IPV6_ICMP}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_IPV6_ICMP,
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_port_range(self):
rule = {'ethertype': constants.IPv4,
'protocol': constants.PROTO_NUM_TCP,
'port_range_min': 22,
'port_range_max': 23}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_TCP,
'tcp_dst': '0x0016/0xfffe'
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_icmp(self):
rule = {'ethertype': constants.IPv4,
'protocol': constants.PROTO_NUM_ICMP,
'port_range_min': 0}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_ICMP,
'icmp_type': 0
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_ipv6_icmp(self):
rule = {'ethertype': constants.IPv6,
'protocol': constants.PROTO_NUM_IPV6_ICMP,
'port_range_min': 5,
'port_range_max': 0}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_IPV6_ICMP,
'icmp_type': 5,
'icmp_code': 0,
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
class TestCreatePortRangeFlows(base.BaseTestCase):
def _test_create_port_range_flows_helper(self, expected_flows, rule):
flow_template = {'some_settings': 'foo'}
for flow in expected_flows:
flow.update(flow_template)
port_range_flows = rules.create_port_range_flows(flow_template, rule)
self.assertEqual(expected_flows, port_range_flows)
def test_create_port_range_flows_with_source_and_destination(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
'source_port_range_min': 123,
'source_port_range_max': 124,
'port_range_min': 10,
'port_range_max': 11,
}
expected_flows = [
{'tcp_src': '0x007b', 'tcp_dst': '0x000a/0xfffe'},
{'tcp_src': '0x007c', 'tcp_dst': '0x000a/0xfffe'},
]
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_flows_with_source(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
'source_port_range_min': 123,
'source_port_range_max': 124,
}
expected_flows = [
{'tcp_src': '0x007b'},
{'tcp_src': '0x007c'},
]
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_flows_with_destination(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
'port_range_min': 10,
'port_range_max': 11,
}
expected_flows = [
{'tcp_dst': '0x000a/0xfffe'},
]
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_flows_without_port_range(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
}
expected_flows = []
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_with_icmp_protocol(self):
# NOTE: such call is prevented by create_protocols_flows
rule = {
'protocol': constants.PROTO_NUM_ICMP,
'port_range_min': 10,
'port_range_max': 11,
}
expected_flows = []
self._test_create_port_range_flows_helper(expected_flows, rule)
class TestCreateFlowsForIpAddress(base.BaseTestCase):
def _generate_conjuncion_actions(self, conj_ids, offset):
return ','.join(
["conjunction(%d,1/2)" % (c + offset)
for c in conj_ids])
def test_create_flows_for_ip_address_egress(self):
expected_template = {
'table': ovs_consts.RULES_EGRESS_TABLE,
'priority': 72,
'dl_type': n_const.ETHERTYPE_IP,
'reg_net': 0x123,
'nw_dst': '192.168.0.1/32'
}
conj_ids = [12, 20]
flows = rules.create_flows_for_ip_address(
'192.168.0.1', constants.EGRESS_DIRECTION, constants.IPv4,
0x123, conj_ids)
self.assertEqual(2, len(flows))
self.assertEqual(ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
flows[0]['ct_state'])
self.assertEqual(ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
flows[1]['ct_state'])
for i in range(2):
self.assertEqual(self._generate_conjuncion_actions(conj_ids, i),
flows[i]['actions'])
for f in flows:
del f['actions']
del f['ct_state']
self.assertEqual(expected_template, f)
class TestCreateConjFlows(base.BaseTestCase):
def test_create_conj_flows(self):
ovs_port = mock.Mock(ofport=1, vif_mac='00:00:00:00:00:00')
port_dict = {'device': 'port_id'}
port = ovsfw.OFPort(
port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
conj_id = 1234
expected_template = {
'table': ovs_consts.RULES_INGRESS_TABLE,
'dl_type': n_const.ETHERTYPE_IPV6,
'priority': 71,
'conj_id': conj_id,
'reg_port': port.ofport
}
flows = rules.create_conj_flows(port, conj_id,
constants.INGRESS_DIRECTION,
constants.IPv6)
self.assertEqual(ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
flows[0]['ct_state'])
self.assertEqual(ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
flows[1]['ct_state'])
self.assertEqual("output:{:d}".format(port.ofport),
flows[0]['actions'])
self.assertEqual("ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s},"
"resubmit(,{:d})".format(
ovsfw_consts.REG_NET, flows[0]['actions'],
ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE),
flows[1]['actions'])
for f in flows:
del f['actions']
del f['ct_state']
self.assertEqual(expected_template, f)
expected_template['conj_id'] += 1
class TestMergeRules(base.BaseTestCase):
def setUp(self):
super(TestMergeRules, self).setUp()
self.rule_tmpl = [('direction', 'ingress'), ('ethertype', 'IPv4'),
('protocol', 6)]
def _test_merge_port_ranges_helper(self, expected, result):
"""Take a list of (port_range_min, port_range_max, conj_ids)
and an output from rules.merge_port_ranges and check if they
are identical, ignoring the other rule fields.
"""
self.assertEqual(len(expected), len(result))
for (range_min, range_max, conj_ids), result1 in zip(
expected, result):
self.assertEqual(range_min, result1[0]['port_range_min'])
self.assertEqual(range_max, result1[0]['port_range_max'])
self.assertEqual(conj_ids, set(result1[1]))
def test__assert_mergeable_rules(self):
self.assertRaises(RuntimeError,
rules._assert_mergeable_rules,
[({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, 8),
({'direction': 'ingress', 'ethertype': 'IPv6'},
16)])
def test_merge_common_rules_single(self):
rule_conj_tuple = ({'direction': 'egress', 'ethertype': 'IPv4',
'protocol': 1}, 8)
result = rules.merge_common_rules([rule_conj_tuple])
self.assertEqual([(rule_conj_tuple[0], [rule_conj_tuple[1]])],
result)
def test_merge_common_rules(self):
rule_conj_list = [({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, 8),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3}, 16),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3,
'port_range_max': 0}, 40),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, 24)]
result = rules.merge_common_rules(rule_conj_list)
self.assertItemsEqual(
[({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, [8, 24]),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3}, [16]),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3, 'port_range_max': 0},
[40])],
result)
def test_merge_port_ranges_overlapping(self):
result = rules.merge_port_ranges(
[(dict([('port_range_min', 20), ('port_range_max', 30)] +
self.rule_tmpl), 6),
(dict([('port_range_min', 30), ('port_range_max', 40)] +
self.rule_tmpl), 14),
(dict([('port_range_min', 35), ('port_range_max', 40)] +
self.rule_tmpl), 22),
(dict([('port_range_min', 20), ('port_range_max', 20)] +
self.rule_tmpl), 30)])
self._test_merge_port_ranges_helper([
# port_range_min, port_range_max, conj_ids
(20, 20, {6, 30}),
(21, 29, {6}),
(30, 30, {6, 14}),
(31, 34, {14}),
(35, 40, {14, 22})], result)
def test_merge_port_ranges_no_port_ranges(self):
result = rules.merge_port_ranges(
[(dict(self.rule_tmpl), 10),
(dict(self.rule_tmpl), 12),
(dict([('port_range_min', 30), ('port_range_max', 40)] +
self.rule_tmpl), 4)])
self._test_merge_port_ranges_helper([
(1, 29, {10, 12}),
(30, 40, {10, 12, 4}),
(41, 65535, {10, 12})], result)
def test_merge_port_ranges_nonoverlapping(self):
result = rules.merge_port_ranges(
[(dict([('port_range_min', 30), ('port_range_max', 40)] +
self.rule_tmpl), 32),
(dict([('port_range_min', 100), ('port_range_max', 140)] +
self.rule_tmpl), 40)])
self._test_merge_port_ranges_helper(
[(30, 40, {32}), (100, 140, {40})], result)
class TestFlowPriority(base.BaseTestCase):
def test_flow_priority_offset(self):
self.assertEqual(0,
rules.flow_priority_offset(
{'foo': 'bar',
'remote_group_id': 'hoge'}))
self.assertEqual(4,
rules.flow_priority_offset({'foo': 'bar'}))
self.assertEqual(5,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_ICMP}))
self.assertEqual(7,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_TCP}))
self.assertEqual(6,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_ICMP,
'port_range_min': 0}))
self.assertEqual(7,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_IPV6_ICMP,
'port_range_min': 0, 'port_range_max': 0}))
| apache-2.0 |
liangazhou/django-rdp | packages/PyDev/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/encodings/cp037.py | 593 | 13377 | """ Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
google/deepvariant | deepvariant/make_examples_test.py | 1 | 90136 | # Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant.make_examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
import copy
import enum
import errno
import platform
import sys
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import six
from tensorflow.python.platform import gfile
from third_party.nucleus.io import fasta
from third_party.nucleus.io import tfrecord
from third_party.nucleus.io import vcf
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.protos import reference_pb2
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import ranges
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import variantcall_utils
from third_party.nucleus.util import vcf_constants
from deepvariant import dv_constants
from deepvariant import make_examples
from deepvariant import testdata
from deepvariant import tf_utils
from deepvariant.labeler import variant_labeler
from deepvariant.protos import deepvariant_pb2
from deepvariant.protos import realigner_pb2
FLAGS = flags.FLAGS
# Dictionary mapping keys to decoders for decode_example function.
_EXAMPLE_DECODERS = {
'locus': tf_utils.example_locus,
'alt_allele_indices/encoded': tf_utils.example_alt_alleles_indices,
'image/encoded': tf_utils.example_encoded_image,
'variant/encoded': tf_utils.example_variant,
'variant_type': tf_utils.example_variant_type,
'label': tf_utils.example_label,
'image/format': tf_utils.example_image_format,
'image/shape': tf_utils.example_image_shape,
'sequencing_type': tf_utils.example_sequencing_type,
}
def decode_example(example):
"""Decodes a tf.Example from DeepVariant into a dict of Pythonic structures.
Args:
example: tf.Example proto. The example to make into a dictionary.
Returns:
A python dictionary with key/value pairs for each of the fields of example,
with each value decoded as needed into Python structures like protos, list,
etc.
Raises:
KeyError: If example contains a feature without a known decoder.
"""
as_dict = {}
for key in example.features.feature:
if key not in _EXAMPLE_DECODERS:
raise KeyError('Unexpected example key', key)
as_dict[key] = _EXAMPLE_DECODERS[key](example)
return as_dict
def setUpModule():
testdata.init()
def _make_contigs(specs):
"""Makes ContigInfo protos from specs.
Args:
specs: A list of 2- or 3-tuples. All tuples should be of the same length. If
2-element, these should be the name and length in basepairs of each
contig, and their pos_in_fasta will be set to their index in the list. If
the 3-element, the tuple should contain name, length, and pos_in_fasta.
Returns:
A list of ContigInfo protos, one for each spec in specs.
"""
if specs and len(specs[0]) == 3:
return [
reference_pb2.ContigInfo(name=name, n_bases=length, pos_in_fasta=i)
for name, length, i in specs
]
else:
return [
reference_pb2.ContigInfo(name=name, n_bases=length, pos_in_fasta=i)
for i, (name, length) in enumerate(specs)
]
def _from_literals_list(literals, contig_map=None):
"""Makes a list of Range objects from literals."""
return ranges.parse_literals(literals, contig_map)
def _from_literals(literals, contig_map=None):
"""Makes a RangeSet of intervals from literals."""
return ranges.RangeSet.from_regions(literals, contig_map)
def _sharded(basename, num_shards=None):
if num_shards:
return basename + '@' + str(num_shards)
else:
return basename
class TestConditions(enum.Enum):
"""Enum capturing what the test condition we're using."""
USE_BAM = 1
USE_CRAM = 2
USE_MULTI_BAMS = 3
class MakeExamplesEnd2EndTest(parameterized.TestCase):
# Golden sets are created with learning/genomics/internal/create_golden.sh
@parameterized.parameters(
# All tests are run with fast_pass_aligner enabled. There are no
# golden sets version for ssw realigner.
dict(mode='calling', num_shards=0),
dict(mode='calling', num_shards=3),
dict(
mode='training', num_shards=0, labeler_algorithm='haplotype_labeler'),
dict(
mode='training', num_shards=3, labeler_algorithm='haplotype_labeler'),
dict(
mode='training', num_shards=0,
labeler_algorithm='positional_labeler'),
dict(
mode='training', num_shards=3,
labeler_algorithm='positional_labeler'),
# The following tests are for CRAM input:
dict(
mode='calling', num_shards=0, test_condition=TestConditions.USE_CRAM),
dict(
mode='training',
num_shards=0,
test_condition=TestConditions.USE_CRAM,
labeler_algorithm='haplotype_labeler'),
# The following tests are for multiple BAM inputs:
dict(
mode='calling',
num_shards=0,
test_condition=TestConditions.USE_MULTI_BAMS),
dict(
mode='training',
num_shards=0,
test_condition=TestConditions.USE_MULTI_BAMS,
labeler_algorithm='haplotype_labeler'),
)
@flagsaver.flagsaver
def test_make_examples_end2end(self,
mode,
num_shards,
test_condition=TestConditions.USE_BAM,
labeler_algorithm=None,
use_fast_pass_aligner=True):
self.assertIn(mode, {'calling', 'training'})
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.write_run_info = True
FLAGS.ref = testdata.CHR20_FASTA
if test_condition == TestConditions.USE_BAM:
FLAGS.reads = testdata.CHR20_BAM
elif test_condition == TestConditions.USE_CRAM:
FLAGS.reads = testdata.CHR20_CRAM
elif test_condition == TestConditions.USE_MULTI_BAMS:
FLAGS.reads = ','.join(
[testdata.CHR20_BAM_FIRST_HALF, testdata.CHR20_BAM_SECOND_HALF])
FLAGS.candidates = test_utils.test_tmpfile(
_sharded('vsc.tfrecord', num_shards))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('examples.tfrecord', num_shards))
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = mode
FLAGS.gvcf_gq_binsize = 5
FLAGS.use_fast_pass_aligner = use_fast_pass_aligner
if labeler_algorithm is not None:
FLAGS.labeler_algorithm = labeler_algorithm
if mode == 'calling':
FLAGS.gvcf = test_utils.test_tmpfile(
_sharded('gvcf.tfrecord', num_shards))
else:
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
for task_id in range(max(num_shards, 1)):
FLAGS.task = task_id
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Check that our run_info proto contains the basic fields we'd expect:
# (a) our options are written to the run_info.options field.
run_info = make_examples.read_make_examples_run_info(
options.run_info_filename)
self.assertEqual(run_info.options, options)
# (b) run_info.resource_metrics is present and contains our hostname.
self.assertTrue(run_info.HasField('resource_metrics'))
self.assertEqual(run_info.resource_metrics.host_name, platform.node())
# Test that our candidates are reasonable, calling specific helper functions
# to check lots of properties of the output.
candidates = sorted(
tfrecord.read_tfrecords(
FLAGS.candidates, proto=deepvariant_pb2.DeepVariantCall),
key=lambda c: variant_utils.variant_range_tuple(c.variant))
self.verify_deepvariant_calls(candidates, options)
self.verify_variants([call.variant for call in candidates],
region,
options,
is_gvcf=False)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, region, options, verify_labels=mode == 'training')
example_variants = [tf_utils.example_variant(ex) for ex in examples]
self.verify_variants(example_variants, region, options, is_gvcf=False)
# Verify the integrity of the examples and then check that they match our
# golden labeled examples. Note we expect the order for both training and
# calling modes to produce deterministic order because we fix the random
# seed.
if mode == 'calling':
golden_file = _sharded(testdata.GOLDEN_CALLING_EXAMPLES, num_shards)
else:
golden_file = _sharded(testdata.GOLDEN_TRAINING_EXAMPLES, num_shards)
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
if mode == 'calling':
nist_reader = vcf.VcfReader(testdata.TRUTH_VARIANTS_VCF)
nist_variants = list(nist_reader.query(region))
self.verify_nist_concordance(example_variants, nist_variants)
# Check the quality of our generated gvcf file.
gvcfs = variant_utils.sorted_variants(
tfrecord.read_tfrecords(FLAGS.gvcf, proto=variants_pb2.Variant))
self.verify_variants(gvcfs, region, options, is_gvcf=True)
self.verify_contiguity(gvcfs, region)
gvcf_golden_file = _sharded(testdata.GOLDEN_POSTPROCESS_GVCF_INPUT,
num_shards)
expected_gvcfs = list(
tfrecord.read_tfrecords(gvcf_golden_file, proto=variants_pb2.Variant))
# Despite the name, assertCountEqual checks that all elements match.
self.assertCountEqual(gvcfs, expected_gvcfs)
if (mode == 'training' and num_shards == 0 and
labeler_algorithm != 'positional_labeler'):
# The positional labeler doesn't track metrics, so don't try to read them
# in when that's the mode.
self.assertEqual(
make_examples.read_make_examples_run_info(
testdata.GOLDEN_MAKE_EXAMPLES_RUN_INFO).labeling_metrics,
run_info.labeling_metrics)
@flagsaver.flagsaver
def test_make_examples_end2end_failed_on_mismatched_multi_bam(self):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.write_run_info = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = ','.join([testdata.CHR20_BAM, testdata.NOCHR20_BAM])
FLAGS.candidates = test_utils.test_tmpfile(
_sharded('mismatched_multi_bam.vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('mismatched_multi_bam.examples.tfrecord'))
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = 'calling'
FLAGS.gvcf_gq_binsize = 5
options = make_examples.default_options(add_flags=True)
# This shows an example of what the error message looks like:
with six.assertRaisesRegex(
self, ValueError, 'Not found: Unknown reference_name '
'reference_name: "chr20" start: 9999999 end: 10000999\n'
'The region chr20:10000000-10000999 does not exist in '
'.*HG002_NIST_150bp_downsampled_30x.chr20.10_10p1mb.bam.'):
make_examples.make_examples_runner(options)
@flagsaver.flagsaver
def test_make_examples_end2end_failed_on_cram(self):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.use_ref_for_cram = False
FLAGS.write_run_info = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_CRAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('failed.vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('failed.examples.tfrecord'))
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = 'calling'
FLAGS.gvcf_gq_binsize = 5
options = make_examples.default_options(add_flags=True)
with six.assertRaisesRegex(self, ValueError,
'Failed to parse BAM/CRAM file.'):
make_examples.make_examples_runner(options)
# Golden sets are created with learning/genomics/internal/create_golden.sh
@flagsaver.flagsaver
def test_make_examples_training_end2end_with_customized_classes_labeler(self):
FLAGS.labeler_algorithm = 'customized_classes_labeler'
FLAGS.customized_classes_labeler_classes_list = 'ref,class1,class2'
FLAGS.customized_classes_labeler_info_field_name = 'type'
region = ranges.parse_literal('chr20:10,000,000-10,004,000')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(_sharded('examples.tfrecord'))
FLAGS.partition_size = 1000
FLAGS.mode = 'training'
FLAGS.gvcf_gq_binsize = 5
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF_WITH_TYPES
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
golden_file = _sharded(testdata.CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, region, options, verify_labels=True)
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
# Golden sets are created with learning/genomics/internal/create_golden.sh
@parameterized.parameters(
dict(mode='calling'),
dict(mode='training'),
)
@flagsaver.flagsaver
def test_make_examples_end2end_vcf_candidate_importer(self, mode):
FLAGS.variant_caller = 'vcf_candidate_importer'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(
_sharded('vcf_candidate_importer.{}.tfrecord'.format(mode)))
FLAGS.examples = test_utils.test_tmpfile(
_sharded('vcf_candidate_importer.examples.{}.tfrecord'.format(mode)))
FLAGS.mode = mode
if mode == 'calling':
golden_file = _sharded(
testdata.GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES)
FLAGS.proposed_variants = testdata.VCF_CANDIDATE_IMPORTER_VARIANTS
# Adding the following flags to match how the testdata was created.
FLAGS.regions = 'chr20:59,777,000-60,000,000'
FLAGS.realign_reads = False
else:
golden_file = _sharded(
testdata.GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES)
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, None, options, verify_labels=mode == 'training')
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
self.assertEqual(decode_example(examples[0])['image/shape'], [100, 221, 6])
@flagsaver.flagsaver
def test_make_examples_training_vcf_candidate_importer_regions(self):
"""Confirms confident_regions is used in vcf_candidate_importer training."""
def _get_examples(use_confident_regions=False):
# `flag_name` can be either 'confident_regions' or 'regions'. Both should
# be used to constrain the set of candidates generated, and as a result
# generating the same examples.
bed_path = test_utils.test_tmpfile('vcf_candidate_importer.bed')
with gfile.Open(bed_path, 'w') as fout:
fout.write('\t'.join(['chr20', '10000000', '10001000']) + '\n')
if use_confident_regions:
FLAGS.confident_regions = bed_path
FLAGS.regions = ''
else:
FLAGS.confident_regions = ''
FLAGS.regions = bed_path
FLAGS.examples = test_utils.test_tmpfile(
_sharded('vcf_candidate_importer.tfrecord'))
FLAGS.mode = 'training'
FLAGS.reads = testdata.CHR20_BAM
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.variant_caller = 'vcf_candidate_importer'
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, None, options, verify_labels=False)
return examples
examples_with_regions = _get_examples(use_confident_regions=False)
examples_with_confident_regions = _get_examples(use_confident_regions=True)
self.assertNotEmpty(examples_with_regions)
self.assertDeepVariantExamplesEqual(examples_with_regions,
examples_with_confident_regions)
# Golden sets are created with learning/genomics/internal/create_golden.sh
@parameterized.parameters(
dict(alt_align='rows', expected_shape=[300, 221, 6]),
dict(alt_align='diff_channels', expected_shape=[100, 221, 8]),
)
@flagsaver.flagsaver
def test_make_examples_training_end2end_with_alt_aligned_pileup(
self, alt_align, expected_shape):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(_sharded('examples.tfrecord'))
FLAGS.partition_size = 1000
FLAGS.mode = 'training'
FLAGS.gvcf_gq_binsize = 5
FLAGS.alt_aligned_pileup = alt_align # This is the only input change.
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
options = make_examples.default_options(add_flags=True)
# Run make_examples with the flags above.
make_examples.make_examples_runner(options)
# Check the output for shape and against the golden file.
if alt_align == 'rows':
golden_file = _sharded(testdata.ALT_ALIGNED_ROWS_EXAMPLES)
elif alt_align == 'diff_channels':
golden_file = _sharded(testdata.ALT_ALIGNED_DIFF_CHANNELS_EXAMPLES)
else:
raise ValueError("Golden data doesn't exist for this alt_align option: "
'{}'.format(alt_align))
# Verify that the variants in the examples are all good.
examples = self.verify_examples(
FLAGS.examples, region, options, verify_labels=True)
self.assertDeepVariantExamplesEqual(
examples, list(tfrecord.read_tfrecords(golden_file)))
# Pileup image should have 3 rows of height 100, so resulting height is 300.
self.assertEqual(decode_example(examples[0])['image/shape'], expected_shape)
@flagsaver.flagsaver
def test_make_examples_runtime_runtime_by_region(self):
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.mode = 'calling'
num_shards = 4
FLAGS.examples = test_utils.test_tmpfile(
_sharded('examples.tfrecord', num_shards))
# Use same number of shards for profiling files as examples.
output_prefix = test_utils.test_tmpfile('runtime_profile')
FLAGS.runtime_by_region = output_prefix + '@{}'.format(num_shards)
FLAGS.task = 2
# Run make_examples with those FLAGS.
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
# Sharded output ending in @4 becomes -00002-of-00004 for task 2.
expected_output_path = output_prefix + '-0000{}-of-00004'.format(FLAGS.task)
expected_columns = [
'region', 'get reads', 'find candidates', 'make pileup images',
'write outputs', 'num reads', 'num candidates', 'num examples'
]
with gfile.Open(expected_output_path, 'r') as fin:
header = fin.readline()
column_names = header.strip().split('\t')
self.assertEqual(expected_columns, column_names)
non_header_lines = fin.readlines()
self.assertLen(non_header_lines, 3)
one_row = non_header_lines[0].strip().split('\t')
self.assertEqual(len(one_row), len(column_names))
self.assertGreater(int(one_row[5]), 0, msg='num reads > 0')
self.assertGreater(int(one_row[6]), 0, msg='num candidates > 0')
self.assertGreater(int(one_row[7]), 0, msg='num examples > 0')
@parameterized.parameters(
dict(select_types=None, expected_count=78),
dict(select_types='all', expected_count=78),
dict(select_types='snps', expected_count=62),
dict(select_types='indels', expected_count=12),
dict(select_types='snps indels', expected_count=74),
dict(select_types='multi-allelics', expected_count=4),
)
@flagsaver.flagsaver
def test_make_examples_with_variant_selection(self, select_types,
expected_count):
if select_types is not None:
FLAGS.select_variant_types = select_types
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile(_sharded('vsc.tfrecord'))
FLAGS.examples = test_utils.test_tmpfile(_sharded('examples.tfrecord'))
FLAGS.partition_size = 1000
FLAGS.mode = 'calling'
options = make_examples.default_options(add_flags=True)
make_examples.make_examples_runner(options)
candidates = list(tfrecord.read_tfrecords(FLAGS.candidates))
self.assertLen(candidates, expected_count)
def verify_nist_concordance(self, candidates, nist_variants):
# Tests that we call almost all of the real variants (according to NIST's
# Genome in a Bottle callset for NA12878) in our candidate callset.
# Tests that we don't have an enormous number of FP calls. We should have
# no more than 5x (arbitrary) more candidate calls than real calls. If we
# have more it's likely due to some major pipeline problem.
self.assertLess(len(candidates), 5 * len(nist_variants))
tp_count = 0
for nist_variant in nist_variants:
if self.assertVariantIsPresent(nist_variant, candidates):
tp_count = tp_count + 1
self.assertGreater(
tp_count / len(nist_variants), 0.983,
'Recall must be greater than 0.983. TP={}, Truth variants={}'.format(
tp_count, len(nist_variants)))
def assertDeepVariantExamplesEqual(self, actual, expected):
"""Asserts that actual and expected tf.Examples from DeepVariant are equal.
Args:
actual: iterable of tf.Examples from DeepVariant. DeepVariant examples
that we want to check.
expected: iterable of tf.Examples. Expected results for actual.
"""
self.assertEqual(len(actual), len(expected))
for i in range(len(actual)):
actual_example = decode_example(actual[i])
expected_example = decode_example(expected[i])
self.assertEqual(actual_example.keys(), expected_example.keys())
for key in actual_example:
self.assertEqual(actual_example[key], expected_example[key],
'Failed on %s' % key)
def assertVariantIsPresent(self, to_find, variants):
def variant_key(v):
return (v.reference_bases, v.start, v.end)
# Finds a call in our actual call set for each NIST variant, asserting
# that we found exactly one.
matches = [
variant for variant in variants
if variant_key(to_find) == variant_key(variant)
]
if not matches:
return False
# Verify that every alt allele appears in the call (but the call might)
# have more than just those calls.
for alt in to_find.alternate_bases:
if alt not in matches[0].alternate_bases:
return False
return True
def verify_variants(self, variants, region, options, is_gvcf):
# Verifies simple properties of the Variant protos in variants. For example,
# checks that the reference_name() is our expected chromosome. The flag
# is_gvcf determines how we check the VariantCall field of each variant,
# enforcing expectations for gVCF records if true or variant calls if false.
for variant in variants:
if region:
self.assertEqual(variant.reference_name, region.reference_name)
self.assertGreaterEqual(variant.start, region.start)
self.assertLessEqual(variant.start, region.end)
self.assertNotEqual(variant.reference_bases, '')
self.assertNotEmpty(variant.alternate_bases)
self.assertLen(variant.calls, 1)
call = variant_utils.only_call(variant)
self.assertEqual(call.call_set_name,
options.variant_caller_options.sample_name)
if is_gvcf:
# GVCF records should have 0/0 or ./. (un-called) genotypes as they are
# reference sites, have genotype likelihoods and a GQ value.
self.assertIn(list(call.genotype), [[0, 0], [-1, -1]])
self.assertLen(call.genotype_likelihood, 3)
self.assertGreaterEqual(variantcall_utils.get_gq(call), 0)
def verify_contiguity(self, contiguous_variants, region):
"""Verifies region is fully covered by gvcf records."""
# We expect that the intervals cover every base, so the first variant should
# be at our interval start and the last one should end at our interval end.
self.assertNotEmpty(contiguous_variants)
self.assertEqual(region.start, contiguous_variants[0].start)
self.assertEqual(region.end, contiguous_variants[-1].end)
# After this loop completes successfully we know that together the GVCF and
# Variants form a fully contiguous cover of our calling interval, as
# expected.
for v1, v2 in zip(contiguous_variants, contiguous_variants[1:]):
# Sequential variants should be contiguous, meaning that v2.start should
# be v1's end, as the end is exclusive and the start is inclusive.
if v1.start == v2.start and v1.end == v2.end:
# Skip duplicates here as we may have multi-allelic variants turning
# into multiple bi-allelic variants at the same site.
continue
# We expect to immediately follow the end of a gvcf record but to occur
# at the base immediately after a variant, since the variant's end can
# span over a larger interval when it's a deletion and we still produce
# gvcf records under the deletion.
expected_start = v1.end if v1.alternate_bases == ['<*>'] else v1.start + 1
self.assertEqual(v2.start, expected_start)
def verify_deepvariant_calls(self, dv_calls, options):
# Verifies simple structural properties of the DeepVariantCall objects
# emitted by the VerySensitiveCaller, such as that the AlleleCount and
# Variant both have the same position.
for call in dv_calls:
for alt_allele in call.variant.alternate_bases:
# Skip ref calls.
if alt_allele == vcf_constants.NO_ALT_ALLELE:
continue
# Make sure allele appears in our allele_support field and that at
# least our min number of reads to call an alt allele are present in
# the supporting reads list for that allele.
self.assertIn(alt_allele, list(call.allele_support))
self.assertGreaterEqual(
len(call.allele_support[alt_allele].read_names),
options.variant_caller_options.min_count_snps)
def verify_examples(self, examples_filename, region, options, verify_labels):
# Do some simple structural checks on the tf.Examples in the file.
expected_features = [
'variant/encoded', 'locus', 'image/format', 'image/encoded',
'alt_allele_indices/encoded'
]
if verify_labels:
expected_features += ['label']
examples = list(tfrecord.read_tfrecords(examples_filename))
for example in examples:
for label_feature in expected_features:
self.assertIn(label_feature, example.features.feature)
# pylint: disable=g-explicit-length-test
self.assertNotEmpty(tf_utils.example_alt_alleles_indices(example))
# Check that the variants in the examples are good.
variants = [tf_utils.example_variant(x) for x in examples]
self.verify_variants(variants, region, options, is_gvcf=False)
return examples
class MakeExamplesUnitTest(parameterized.TestCase):
def test_read_write_run_info(self):
def _read_lines(path):
with open(path) as fin:
return list(fin.readlines())
golden_actual = make_examples.read_make_examples_run_info(
testdata.GOLDEN_MAKE_EXAMPLES_RUN_INFO)
# We don't really want to inject too much knowledge about the golden right
# here, so we only use a minimal test that (a) the run_info_filename is
# a non-empty string and (b) the number of candidates sites in the labeling
# metrics field is greater than 0. Any reasonable golden output will have at
# least one candidate variant, and the reader should have filled in the
# value.
self.assertNotEmpty(golden_actual.options.run_info_filename)
self.assertEqual(golden_actual.labeling_metrics.n_candidate_variant_sites,
testdata.N_GOLDEN_TRAINING_EXAMPLES)
# Check that reading + writing the data produces the same lines:
tmp_output = test_utils.test_tmpfile('written_run_info.pbtxt')
make_examples.write_make_examples_run_info(golden_actual, tmp_output)
self.assertEqual(
_read_lines(testdata.GOLDEN_MAKE_EXAMPLES_RUN_INFO),
_read_lines(tmp_output))
@parameterized.parameters(
dict(
flag_value='CALLING',
expected=deepvariant_pb2.DeepVariantOptions.CALLING,
),
dict(
flag_value='TRAINING',
expected=deepvariant_pb2.DeepVariantOptions.TRAINING,
),
)
def test_parse_proto_enum_flag(self, flag_value, expected):
enum_pb2 = deepvariant_pb2.DeepVariantOptions.Mode
self.assertEqual(
make_examples.parse_proto_enum_flag(enum_pb2, flag_value), expected)
def test_parse_proto_enum_flag_error_handling(self):
with six.assertRaisesRegex(
self, ValueError,
'Unknown enum option "foo". Allowed options are CALLING,TRAINING'):
make_examples.parse_proto_enum_flag(
deepvariant_pb2.DeepVariantOptions.Mode, 'foo')
@flagsaver.flagsaver
def test_keep_duplicates(self):
FLAGS.keep_duplicates = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(options.pic_options.read_requirements.keep_duplicates,
True)
@flagsaver.flagsaver
def test_keep_supplementary_alignments(self):
FLAGS.keep_supplementary_alignments = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(
options.pic_options.read_requirements.keep_supplementary_alignments,
True)
@flagsaver.flagsaver
def test_keep_secondary_alignments(self):
FLAGS.keep_secondary_alignments = True
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(
options.pic_options.read_requirements.keep_secondary_alignments, True)
@flagsaver.flagsaver
def test_min_base_quality(self):
FLAGS.min_base_quality = 5
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(options.pic_options.read_requirements.min_base_quality, 5)
@flagsaver.flagsaver
def test_min_mapping_quality(self):
FLAGS.min_mapping_quality = 15
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertEqual(options.pic_options.read_requirements.min_mapping_quality,
15)
@flagsaver.flagsaver
def test_default_options_with_training_random_emit_ref_sites(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
FLAGS.training_random_emit_ref_sites = 0.3
options = make_examples.default_options(add_flags=True)
self.assertAlmostEqual(
options.variant_caller_options.fraction_reference_sites_to_emit, 0.3)
@flagsaver.flagsaver
def test_default_options_without_training_random_emit_ref_sites(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
# In proto3, there is no way to check presence of scalar field:
# redacted
# As an approximation, we directly check that the value should be exactly 0.
self.assertEqual(
options.variant_caller_options.fraction_reference_sites_to_emit, 0.0)
@flagsaver.flagsaver
def test_invalid_sequencing_type(self):
FLAGS.mode = 'training'
FLAGS.sequencing_type = 'wGs'
with self.assertRaises(ValueError):
make_examples.default_options(add_flags=True)
def test_extract_sample_name_from_reads_single_sample(self):
mock_sample_reader = mock.Mock()
mock_sample_reader.header = reads_pb2.SamHeader(
read_groups=[reads_pb2.ReadGroup(sample_id='sample_name')])
self.assertEqual(
make_examples.extract_sample_name_from_sam_reader(mock_sample_reader),
'sample_name')
@parameterized.parameters(
# No samples could be found in the reads.
dict(samples=[], expected_sample_name=dv_constants.DEFAULT_SAMPLE_NAME),
# Check that we detect an empty sample name and use default instead.
dict(samples=[''], expected_sample_name=dv_constants.DEFAULT_SAMPLE_NAME),
# We have more than one sample in the reads.
dict(samples=['sample1', 'sample2'], expected_sample_name='sample1'),
)
def test_extract_sample_name_from_reads_uses_default_when_necessary(
self, samples, expected_sample_name):
mock_sample_reader = mock.Mock()
mock_sample_reader.header = reads_pb2.SamHeader(read_groups=[
reads_pb2.ReadGroup(sample_id=sample) for sample in samples
])
self.assertEqual(
expected_sample_name,
make_examples.extract_sample_name_from_sam_reader(mock_sample_reader))
@flagsaver.flagsaver
def test_confident_regions(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
FLAGS.confident_regions = testdata.CONFIDENT_REGIONS_BED
FLAGS.mode = 'training'
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
confident_regions = make_examples.read_confident_regions(options)
# Our expected intervals, inlined from CONFIDENT_REGIONS_BED.
expected = _from_literals_list([
'chr20:10000847-10002407', 'chr20:10002521-10004171',
'chr20:10004274-10004964', 'chr20:10004995-10006386',
'chr20:10006410-10007800', 'chr20:10007825-10008018',
'chr20:10008044-10008079', 'chr20:10008101-10008707',
'chr20:10008809-10008897', 'chr20:10009003-10009791',
'chr20:10009934-10010531'
])
# Our confident regions should be exactly those found in the BED file.
six.assertCountEqual(self, expected, list(confident_regions))
@parameterized.parameters(
({
'examples': ('foo', 'foo')
},),
({
'examples': ('foo', 'foo'),
'gvcf': ('bar', 'bar')
},),
({
'examples': ('foo@10', 'foo-00000-of-00010')
},),
({
'task': (0, 0),
'examples': ('foo@10', 'foo-00000-of-00010')
},),
({
'task': (1, 1),
'examples': ('foo@10', 'foo-00001-of-00010')
},),
({
'task': (1, 1),
'examples': ('foo@10', 'foo-00001-of-00010'),
'gvcf': ('bar@10', 'bar-00001-of-00010')
},),
({
'task': (1, 1),
'examples': ('foo@10', 'foo-00001-of-00010'),
'gvcf': ('bar@10', 'bar-00001-of-00010'),
'candidates': ('baz@10', 'baz-00001-of-00010')
},),
)
@flagsaver.flagsaver
def test_sharded_outputs1(self, settings):
# Set all of the requested flag values.
for name, (flag_val, _) in settings.items():
setattr(FLAGS, name, flag_val)
FLAGS.mode = 'training'
FLAGS.reads = ''
FLAGS.ref = ''
options = make_examples.default_options(add_flags=True)
# Check all of the flags.
for name, option_val in [('examples', options.examples_filename),
('candidates', options.candidates_filename),
('gvcf', options.gvcf_filename)]:
expected = settings[name][1] if name in settings else ''
self.assertEqual(expected, option_val)
@flagsaver.flagsaver
def test_gvcf_output_enabled_is_false_without_gvcf_flag(self):
FLAGS.mode = 'training'
FLAGS.gvcf = ''
FLAGS.reads = ''
FLAGS.ref = ''
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertFalse(make_examples.gvcf_output_enabled(options))
@flagsaver.flagsaver
def test_gvcf_output_enabled_is_true_with_gvcf_flag(self):
FLAGS.mode = 'training'
FLAGS.gvcf = '/tmp/foo.vcf'
FLAGS.reads = ''
FLAGS.ref = ''
FLAGS.examples = ''
options = make_examples.default_options(add_flags=True)
self.assertTrue(make_examples.gvcf_output_enabled(options))
@flagsaver.flagsaver
def test_add_supporting_other_alt_color(self):
FLAGS.mode = 'training'
FLAGS.gvcf = ''
FLAGS.reads = ''
FLAGS.ref = ''
FLAGS.examples = ''
FLAGS.add_supporting_other_alt_color = True
options = make_examples.default_options(add_flags=True)
self.assertAlmostEqual(
options.pic_options.other_allele_supporting_read_alpha, 0.3)
self.assertAlmostEqual(options.pic_options.allele_unsupporting_read_alpha,
0.6)
def test_validate_ref_contig_coverage(self):
ref_contigs = _make_contigs([('1', 100), ('2', 100)])
# Fully covered reference contigs don't trigger an error.
for threshold in [0.5, 0.9, 1.0]:
self.assertIsNone(
make_examples.validate_reference_contig_coverage(
ref_contigs, ref_contigs, threshold))
# No common contigs always blows up.
for threshold in [0.0, 0.1, 0.5, 0.9, 1.0]:
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, [], threshold)
# Dropping either contig brings up below our 0.9 threshold.
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('1', 100)]), 0.9)
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('2', 100)]), 0.9)
# Our actual overlap is 50%, so check that we raise when appropriate.
with six.assertRaisesRegex(self, ValueError, 'span 200'):
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('2', 100)]), 0.6)
self.assertIsNone(
make_examples.validate_reference_contig_coverage(
ref_contigs, _make_contigs([('2', 100)]), 0.4))
@parameterized.parameters(
# all intervals are shared.
([[('chrM', 10)], [('chrM', 10)]], [('chrM', 10)]),
# No common intervals.
([[('chrM', 10)], [('chr1', 10)]], []),
# The names are the same but sizes are different, so not common.
([[('chrM', 10)], [('chrM', 20)]], []),
# One common interval and one not.
([[('chrM', 10), ('chr1', 20)], [('chrM', 10),
('chr2', 30)]], [('chrM', 10)]),
# Check that the order doesn't matter.
([[('chr1', 20), ('chrM', 10)], [('chrM', 10),
('chr2', 30)]], [('chrM', 10, 1)]),
# Three-way merges.
([
[('chr1', 20), ('chrM', 10)],
[('chrM', 10), ('chr2', 30)],
[('chr2', 30), ('chr3', 30)],
], []),
)
def test_common_contigs(self, contigs_list, expected):
self.assertEqual(
_make_contigs(expected),
make_examples.common_contigs(
[_make_contigs(contigs) for contigs in contigs_list]))
@parameterized.parameters(
# Note that these tests aren't so comprehensive as we are trusting that
# the intersection code logic itself is good and well-tested elsewhere.
# Here we are focusing on some basic tests and handling of missing
# calling_region and confident_region data.
(['1:1-10'], ['1:1-10']),
(['1:1-100'], ['1:1-100']),
(['1:50-150'], ['1:50-100']),
(None, ['1:1-100', '2:1-200']),
(['1:20-50'], ['1:20-50']),
# Chr3 isn't part of our contigs; make sure we tolerate it.
(['1:20-30', '1:40-60', '3:10-50'], ['1:20-30', '1:40-60']),
# Check that we handle overlapping calling or confident regions.
(['1:25-30', '1:20-40'], ['1:20-40']),
)
def test_regions_to_process(self, calling_regions, expected):
contigs = _make_contigs([('1', 100), ('2', 200)])
six.assertCountEqual(
self, _from_literals_list(expected),
make_examples.regions_to_process(
contigs, 1000, calling_regions=_from_literals(calling_regions)))
@parameterized.parameters(
(50, None, [
'1:1-50', '1:51-100', '2:1-50', '2:51-76', '3:1-50', '3:51-100',
'3:101-121'
]),
(120, None, ['1:1-100', '2:1-76', '3:1-120', '3:121']),
(500, None, ['1:1-100', '2:1-76', '3:1-121']),
(10, ['1:1-20', '1:30-35'], ['1:1-10', '1:11-20', '1:30-35']),
(8, ['1:1-20', '1:30-35'], ['1:1-8', '1:9-16', '1:17-20', '1:30-35']),
)
def test_regions_to_process_partition(self, max_size, calling_regions,
expected):
contigs = _make_contigs([('1', 100), ('2', 76), ('3', 121)])
six.assertCountEqual(
self, _from_literals_list(expected),
make_examples.regions_to_process(
contigs, max_size, calling_regions=_from_literals(calling_regions)))
@parameterized.parameters(
dict(includes=[], excludes=[], expected=['1:1-100', '2:1-200']),
dict(includes=['1'], excludes=[], expected=['1:1-100']),
# Check that excludes work as expected.
dict(includes=[], excludes=['1'], expected=['2:1-200']),
dict(includes=[], excludes=['2'], expected=['1:1-100']),
dict(includes=[], excludes=['1', '2'], expected=[]),
# Check that excluding pieces works. The main checks on taking the
# difference between two RangeSets live in ranges.py so here we are just
# making sure some basic logic works.
dict(includes=['1'], excludes=['1:1-10'], expected=['1:11-100']),
# Check that includes and excludes work together.
dict(
includes=['1', '2'],
excludes=['1:5-10', '1:20-50', '2:10-20'],
expected=['1:1-4', '1:11-19', '1:51-100', '2:1-9', '2:21-200']),
dict(
includes=['1'],
excludes=['1:5-10', '1:20-50', '2:10-20'],
expected=['1:1-4', '1:11-19', '1:51-100']),
dict(
includes=['2'],
excludes=['1:5-10', '1:20-50', '2:10-20'],
expected=['2:1-9', '2:21-200']),
# A complex example of including and excluding.
dict(
includes=['1:10-20', '2:50-60', '2:70-80'],
excludes=['1:1-13', '1:19-50', '2:10-65'],
expected=['1:14-18', '2:70-80']),
)
def test_build_calling_regions(self, includes, excludes, expected):
contigs = _make_contigs([('1', 100), ('2', 200)])
actual = make_examples.build_calling_regions(contigs, includes, excludes)
six.assertCountEqual(self, actual, _from_literals_list(expected))
def test_regions_to_process_sorted_within_contig(self):
# These regions are out of order but within a single contig.
contigs = _make_contigs([('z', 100)])
in_regions = _from_literals(['z:15', 'z:20', 'z:6', 'z:25-30', 'z:3-4'])
sorted_regions = _from_literals_list(
['z:3-4', 'z:6', 'z:15', 'z:20', 'z:25-30'])
actual_regions = list(
make_examples.regions_to_process(
contigs, 100, calling_regions=in_regions))
# The assertEqual here is checking the order is exactly what we expect.
self.assertEqual(sorted_regions, actual_regions)
def test_regions_to_process_sorted_contigs(self):
# These contig names are out of order lexicographically.
contigs = _make_contigs([('z', 100), ('a', 100), ('n', 100)])
in_regions = _from_literals(['a:10', 'n:1', 'z:20', 'z:5'])
sorted_regions = _from_literals_list(['z:5', 'z:20', 'a:10', 'n:1'])
actual_regions = list(
make_examples.regions_to_process(
contigs, 100, calling_regions=in_regions))
# The assertEqual here is checking the order is exactly what we expect.
self.assertEqual(sorted_regions, actual_regions)
@parameterized.parameters([2, 3, 4, 5, 50])
def test_regions_to_process_sharding(self, num_shards):
"""Makes sure we deterministically split up regions."""
def get_regions(task_id, num_shards):
return make_examples.regions_to_process(
contigs=_make_contigs([('z', 100), ('a', 100), ('n', 100)]),
partition_size=5,
task_id=task_id,
num_shards=num_shards)
# Check that the regions are the same unsharded vs. sharded.
unsharded_regions = get_regions(0, 0)
sharded_regions = []
for task_id in range(num_shards):
task_regions = get_regions(task_id, num_shards)
sharded_regions.extend(task_regions)
six.assertCountEqual(self, unsharded_regions, sharded_regions)
@parameterized.parameters(
# Providing one of task id and num_shards but not the other is bad.
(None, 0),
(None, 2),
(2, None),
(0, None),
# Negative values are illegal.
(-1, 2),
(0, -2),
# task_id >= num_shards is bad.
(2, 2),
(3, 2),
)
def test_regions_to_process_fails_with_bad_shard_args(self, task, num_shards):
with self.assertRaises(ValueError):
make_examples.regions_to_process(
contigs=_make_contigs([('z', 100), ('a', 100), ('n', 100)]),
partition_size=10,
task_id=task,
num_shards=num_shards)
@parameterized.parameters(
# One variant in region.
(['x:100-200'], ['x:150-151'], [0]),
# Different chromosomes.
(['x:100-200'], ['y:150-151'], []),
# A variant at the beginning of a region.
(['x:100-200', 'x:201-300'], ['x:100-101'], [0]),
(['x:1-10', 'x:11-20', 'x:21-30'], ['x:11-12'], [1]),
# A variant before all the regions.
(['x:11-20', 'x:20-30'], ['x:1-2'], []),
# A variant after all the regions.
(['x:1-10', 'x:11-20', 'x:21-30'], ['x:40-50'], []),
# Multiple variants in the same region.
(['x:11-20', 'x:21-30'
], ['x:1-2', 'x:25-26', 'x:25-26', 'x:26-27', 'x:40-50'], [1]),
# A variant spanning multiple regions belongs where it starts.
(['x:1-10', 'x:11-20', 'x:21-30', 'x:31-40', 'x:41-50', 'x:51-60'
], ['x:15-66'], [1]),
)
def test_filter_regions_by_vcf(self, region_literals, variant_literals,
regions_to_keep):
regions = [ranges.parse_literal(l) for l in region_literals]
variant_positions = [ranges.parse_literal(l) for l in variant_literals]
output = make_examples.filter_regions_by_vcf(regions, variant_positions)
list_output = list(output)
list_expected = [regions[i] for i in regions_to_keep]
self.assertEqual(list_output, list_expected)
def test_catches_bad_argv(self):
with mock.patch.object(logging, 'error') as mock_logging,\
mock.patch.object(sys, 'exit') as mock_exit:
make_examples.main(['make_examples.py', 'extra_arg'])
mock_logging.assert_called_once_with(
'Command line parsing failure: make_examples does not accept '
'positional arguments but some are present on the command line: '
'"[\'make_examples.py\', \'extra_arg\']".')
mock_exit.assert_called_once_with(errno.ENOENT)
@flagsaver.flagsaver
def test_catches_bad_flags(self):
# Set all of the requested flag values.
region = ranges.parse_literal('chr20:10,000,000-10,010,000')
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.candidates = test_utils.test_tmpfile('vsc.tfrecord')
FLAGS.examples = test_utils.test_tmpfile('examples.tfrecord')
FLAGS.regions = [ranges.to_literal(region)]
FLAGS.partition_size = 1000
FLAGS.mode = 'training'
FLAGS.truth_variants = testdata.TRUTH_VARIANTS_VCF
# This is the bad flag.
FLAGS.confident_regions = ''
with mock.patch.object(logging, 'error') as mock_logging,\
mock.patch.object(sys, 'exit') as mock_exit:
make_examples.main(['make_examples.py'])
mock_logging.assert_called_once_with(
'confident_regions is required when in training mode.')
mock_exit.assert_called_once_with(errno.ENOENT)
@parameterized.parameters(
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2', '3'],
vcf_names=None,
names_to_exclude=[],
min_coverage_fraction=1.0,
expected_names=['1', '2', '3']),
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=None,
names_to_exclude=[],
min_coverage_fraction=0.66,
expected_names=['1', '2']),
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=['1', '3'],
names_to_exclude=[],
min_coverage_fraction=0.33,
expected_names=['1']),
dict(
ref_names=['1', '2', '3', '4', '5'],
sam_names=['1', '2', '3'],
vcf_names=None,
names_to_exclude=['4', '5'],
min_coverage_fraction=1.0,
expected_names=['1', '2', '3']),
)
def test_ensure_consistent_contigs(self, ref_names, sam_names, vcf_names,
names_to_exclude, min_coverage_fraction,
expected_names):
ref_contigs = _make_contigs([(name, 100) for name in ref_names])
sam_contigs = _make_contigs([(name, 100) for name in sam_names])
if vcf_names is not None:
vcf_contigs = _make_contigs([(name, 100) for name in vcf_names])
else:
vcf_contigs = None
actual = make_examples._ensure_consistent_contigs(ref_contigs, sam_contigs,
vcf_contigs,
names_to_exclude,
min_coverage_fraction)
self.assertEqual([a.name for a in actual], expected_names)
@parameterized.parameters(
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=None,
names_to_exclude=[],
min_coverage_fraction=0.67),
dict(
ref_names=['1', '2', '3'],
sam_names=['1', '2'],
vcf_names=['1', '3'],
names_to_exclude=[],
min_coverage_fraction=0.34),
)
def test_ensure_inconsistent_contigs(self, ref_names, sam_names, vcf_names,
names_to_exclude, min_coverage_fraction):
ref_contigs = _make_contigs([(name, 100) for name in ref_names])
sam_contigs = _make_contigs([(name, 100) for name in sam_names])
if vcf_names is not None:
vcf_contigs = _make_contigs([(name, 100) for name in vcf_names])
else:
vcf_contigs = None
with six.assertRaisesRegex(self, ValueError, 'Reference contigs span'):
make_examples._ensure_consistent_contigs(ref_contigs, sam_contigs,
vcf_contigs, names_to_exclude,
min_coverage_fraction)
@flagsaver.flagsaver
def test_regions_and_exclude_regions_flags(self):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.regions = 'chr20:10,000,000-11,000,000'
FLAGS.examples = 'examples.tfrecord'
FLAGS.exclude_regions = 'chr20:10,010,000-10,100,000'
options = make_examples.default_options(add_flags=True)
six.assertCountEqual(
self,
list(
ranges.RangeSet(
make_examples.processing_regions_from_options(options))),
_from_literals_list(
['chr20:10,000,000-10,009,999', 'chr20:10,100,001-11,000,000']))
@flagsaver.flagsaver
def test_incorrect_empty_regions(self):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
# Deliberately incorrect contig name.
FLAGS.regions = '20:10,000,000-11,000,000'
FLAGS.examples = 'examples.tfrecord'
options = make_examples.default_options(add_flags=True)
with six.assertRaisesRegex(self, ValueError,
'The regions to call is empty.'):
make_examples.processing_regions_from_options(options)
@parameterized.parameters(
# A SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T']),
reference_haplotype='GCACCT',
reference_offset=60165,
expected_return=[{
'haplotype':
'GCATCT',
'alt':
'T',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T'])
}]),
# A deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
reference_haplotype='TTTCCATTCCAGTCCAT',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCATTCCAT',
'alt':
'AT',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT'])
}]),
# An insertion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA']),
reference_haplotype='TTTCCATTCCAGTCCAT',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCATTCCATTCCAGTCCAT',
'alt':
'TTTCCATTCCA',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA'])
}]),
# A deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
reference_haplotype='TTTCCATTCCAG',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCAT',
'alt':
'AT',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT'])
}]),
# An insertion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA']),
reference_haplotype='TTTCCATTCCAG',
reference_offset=60279,
expected_return=[{
'haplotype':
'TTTCCATTCCATTCCAG',
'alt':
'TTTCCATTCCA',
'variant':
variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['TTTCCATTCCA'])
}]))
def test_update_haplotype(self, variant, reference_haplotype,
reference_offset, expected_return):
list_hap_obj = make_examples.update_haplotype(variant, reference_haplotype,
reference_offset)
self.assertListEqual(list_hap_obj, expected_return)
@parameterized.parameters(
dict(
dv_variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
cohort_variants=[
variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T', 'TTTCCATTCCA']),
variants_pb2.Variant(
reference_name='chr20',
start=60285,
end=60291,
reference_bases='TTTCCA',
alternate_bases=['T']),
],
expected_ref_haplotype='TTTCCATTCCAG',
expected_ref_offset=60279))
def test_get_ref_haplotype_and_offset(self, dv_variant, cohort_variants,
expected_ref_haplotype,
expected_ref_offset):
ref_reader = fasta.IndexedFastaReader(testdata.CHR20_GRCH38_FASTA)
ref_haplotype, ref_offset = make_examples.get_ref_haplotype_and_offset(
dv_variant, cohort_variants, ref_reader)
self.assertEqual(ref_haplotype, expected_ref_haplotype)
self.assertEqual(ref_offset, expected_ref_offset)
# pylint: disable=unused-argument
@parameterized.parameters(
# A matched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T']),
expected_return=dict(C=0.9998, T=0.0002),
label='matched_snp_1'),
# A matched deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60285,
end=60291,
reference_bases='TTCCAG',
alternate_bases=['T']),
expected_return=dict(T=0.001198, TTCCAG=0.998802),
label='matched_del_1'),
# A unmatched deletion.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['A']),
expected_return=dict(A=0, ATTCCAG=1),
label='unmatched_del_1'),
# A matched deletion, where the candidate is formatted differently.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60284,
end=60291,
reference_bases='ATTCCAG',
alternate_bases=['AT']),
expected_return=dict(AT=0.001198, ATTCCAG=0.998802),
label='matched_del_2: diff representation'),
# An unmatched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60150,
end=60151,
reference_bases='C',
alternate_bases=['T']),
expected_return=dict(C=1, T=0),
label='unmatched_snp_1'),
# A matched SNP and an unmatched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T', 'A']),
expected_return=dict(C=0.9998, T=0.0002, A=0),
label='mixed_snp_1'),
# An unmatched SNP, where the REF allele frequency is not 1.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['A']),
expected_return=dict(C=0.9998, A=0),
label='unmatched_snp_2: non-1 ref allele'),
# A multi-allelic candidate at a multi-allelic locus.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T', 'TTTCCATTCCA']),
expected_return=dict(TTTCCA=0.999401, T=0.000399, TTTCCATTCCA=0.0002),
label='matched_mult_1'),
# A multi-allelic candidate at a multi-allelic locus.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T', 'TATCCATTCCA']),
expected_return=dict(TTTCCA=0.999401, T=0.000399, TATCCATTCCA=0),
label='unmatched_mult_1'),
# [Different representation]
# A deletion where the cohort variant is represented differently.
# In this case, REF frequency is calculated by going over all cohort ALTs.
# Thus, the sum of all dict values is not equal to 1.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60295,
end=60301,
reference_bases='TTCCAT',
alternate_bases=['T']),
expected_return=dict(T=0.000399, TTCCAT=0.923922),
label='matched_del_3: diff representation'),
# [Non-candidate allele]
# One allele of a multi-allelic cohort variant is not in candidate.
# The non-candidate allele should be ignored.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60279,
end=60285,
reference_bases='TTTCCA',
alternate_bases=['T']),
expected_return=dict(TTTCCA=0.999401, T=0.000399),
label='matched_del_4: multi-allelic cohort'),
# A left-align example.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=9074790,
end=9074794,
reference_bases='CT',
alternate_bases=['C', 'CTTT']),
expected_return=dict(C=0.167732, CTTT=0.215256, CT=0.442092),
label='matched_mult_2: left align'),
# A left-align example.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=9074790,
end=9074794,
reference_bases='C',
alternate_bases=['CTTT']),
expected_return=dict(CTTT=0.145367, C=0.442092),
label='matched_ins_1: left align'),
# A left-align example.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=9074790,
end=9074793,
reference_bases='CTT',
alternate_bases=['CTTA']),
expected_return=dict(CTTA=0, CTT=0.442092),
label='unmatched_ins_1: left align'),
# A matched mnps.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=61065,
end=61066,
reference_bases='T',
alternate_bases=['C']),
expected_return=dict(C=0.079872, T=0.919729),
label='matched_mnps_1'),
# A matched SNP.
dict(
variant=variants_pb2.Variant(
reference_name='chr20',
start=62022,
end=62023,
reference_bases='G',
alternate_bases=['C', 'T']),
expected_return=dict(G=0.996206, C=0.003594, T=0),
label='matched_snp_2'))
def test_find_matching_allele_frequency(self, variant, expected_return,
label):
ref_reader = fasta.IndexedFastaReader(testdata.CHR20_GRCH38_FASTA)
vcf_reader = vcf.VcfReader(testdata.VCF_WITH_ALLELE_FREQUENCIES)
allele_frequencies = make_examples.find_matching_allele_frequency(
variant, vcf_reader, ref_reader)
# Compare keys.
self.assertSetEqual(
set(allele_frequencies.keys()), set(expected_return.keys()))
# Compare values (almost equal).
for key in allele_frequencies.keys():
self.assertAlmostEqual(allele_frequencies[key], expected_return[key])
# pylint: enable=unused-argument
class RegionProcessorTest(parameterized.TestCase):
def setUp(self):
super(RegionProcessorTest, self).setUp()
self.region = ranges.parse_literal('chr20:10,000,000-10,000,100')
FLAGS.reads = ''
self.options = make_examples.default_options(add_flags=False)
self.options.reference_filename = testdata.CHR20_FASTA
if not self.options.reads_filenames:
self.options.reads_filenames.extend(testdata.CHR20_BAM)
self.options.truth_variants_filename = testdata.TRUTH_VARIANTS_VCF
self.options.mode = deepvariant_pb2.DeepVariantOptions.TRAINING
self.options.variant_caller_options.sample_name = 'sample_id'
self.processor = make_examples.RegionProcessor(self.options)
self.ref_reader = fasta.IndexedFastaReader(self.options.reference_filename)
self.mock_init = self.add_mock('initialize')
self.default_shape = [5, 5, 7]
self.default_format = 'raw'
def add_mock(self, name, retval='dontadd', side_effect='dontadd'):
patcher = mock.patch.object(self.processor, name, autospec=True)
self.addCleanup(patcher.stop)
mocked = patcher.start()
if retval != 'dontadd':
mocked.return_value = retval
if side_effect != 'dontadd':
mocked.side_effect = side_effect
return mocked
def test_on_demand_initialization_called_if_not_initialized(self):
candidates = ['Candidates']
self.assertFalse(self.processor.initialized)
self.processor.in_memory_sam_reader = mock.Mock()
mock_rr = self.add_mock('region_reads', retval=[])
mock_cir = self.add_mock('candidates_in_region', retval=(candidates, []))
mock_lc = self.add_mock('label_candidates', retval=[])
self.processor.process(self.region)
test_utils.assert_called_once_workaround(self.mock_init)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
mock_cir.assert_called_once_with(self.region)
mock_lc.assert_called_once_with(candidates, self.region)
def test_on_demand_initialization_not_called_if_initialized(self):
self.processor.initialized = True
self.assertTrue(self.processor.initialized)
self.processor.in_memory_sam_reader = mock.Mock()
mock_rr = self.add_mock('region_reads', retval=[])
mock_cir = self.add_mock('candidates_in_region', retval=([], []))
mock_lc = self.add_mock('label_candidates', retval=[])
self.processor.process(self.region)
test_utils.assert_not_called_workaround(self.mock_init)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
mock_cir.assert_called_once_with(self.region)
test_utils.assert_called_once_workaround(mock_lc)
def test_process_calls_no_candidates(self):
self.processor.in_memory_sam_reader = mock.Mock()
mock_rr = self.add_mock('region_reads', retval=[])
mock_cir = self.add_mock('candidates_in_region', retval=([], []))
mock_cpe = self.add_mock('create_pileup_examples', retval=[])
mock_lc = self.add_mock('label_candidates')
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEmpty(candidates)
self.assertEmpty(examples)
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
mock_cir.assert_called_once_with(self.region)
test_utils.assert_not_called_workaround(mock_cpe)
mock_lc.assert_called_once_with([], self.region)
@parameterized.parameters([
deepvariant_pb2.DeepVariantOptions.TRAINING,
deepvariant_pb2.DeepVariantOptions.CALLING
])
def test_process_calls_with_candidates(self, mode):
self.processor.options.mode = mode
self.processor.in_memory_sam_reader = mock.Mock()
mock_read = mock.MagicMock()
mock_candidate = mock.MagicMock()
mock_example = mock.MagicMock()
mock_label = mock.MagicMock()
mock_rr = self.add_mock('region_reads', retval=[mock_read])
mock_cir = self.add_mock(
'candidates_in_region', retval=([mock_candidate], []))
mock_cpe = self.add_mock('create_pileup_examples', retval=[mock_example])
mock_lc = self.add_mock(
'label_candidates', retval=[(mock_candidate, mock_label)])
mock_alte = self.add_mock('add_label_to_example', retval=mock_example)
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEqual(candidates, [mock_candidate])
self.assertEqual(examples, [mock_example])
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
mock_rr.assert_called_once_with(self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[mock_read])
mock_cir.assert_called_once_with(self.region)
mock_cpe.assert_called_once_with(mock_candidate)
if mode == deepvariant_pb2.DeepVariantOptions.TRAINING:
mock_lc.assert_called_once_with([mock_candidate], self.region)
mock_alte.assert_called_once_with(mock_example, mock_label)
else:
# In training mode we don't label our candidates.
test_utils.assert_not_called_workaround(mock_lc)
test_utils.assert_not_called_workaround(mock_alte)
@parameterized.parameters([
deepvariant_pb2.DeepVariantOptions.TRAINING,
deepvariant_pb2.DeepVariantOptions.CALLING
])
def test_process_keeps_ordering_of_candidates_and_examples(self, mode):
self.processor.options.mode = mode
r1, r2 = mock.Mock(), mock.Mock()
c1, c2 = mock.Mock(), mock.Mock()
l1, l2 = mock.Mock(), mock.Mock()
e1, e2, e3 = mock.Mock(), mock.Mock(), mock.Mock()
self.processor.in_memory_sam_reader = mock.Mock()
self.add_mock('region_reads', retval=[r1, r2])
self.add_mock('candidates_in_region', retval=([c1, c2], []))
mock_cpe = self.add_mock(
'create_pileup_examples', side_effect=[[e1], [e2, e3]])
mock_lc = self.add_mock('label_candidates', retval=[(c1, l1), (c2, l2)])
mock_alte = self.add_mock('add_label_to_example', side_effect=[e1, e2, e3])
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEqual(candidates, [c1, c2])
self.assertEqual(examples, [e1, e2, e3])
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[r1, r2])
# We don't try to label variants when in calling mode.
self.assertEqual([mock.call(c1), mock.call(c2)], mock_cpe.call_args_list)
if mode == deepvariant_pb2.DeepVariantOptions.CALLING:
# In calling mode, we never try to label.
test_utils.assert_not_called_workaround(mock_lc)
test_utils.assert_not_called_workaround(mock_alte)
else:
mock_lc.assert_called_once_with([c1, c2], self.region)
self.assertEqual([
mock.call(e1, l1),
mock.call(e2, l2),
mock.call(e3, l2),
], mock_alte.call_args_list)
def test_process_with_realigner(self):
self.processor.options.mode = deepvariant_pb2.DeepVariantOptions.CALLING
self.processor.options.realigner_enabled = True
self.processor.options.realigner_options.CopyFrom(
realigner_pb2.RealignerOptions())
self.processor.realigner = mock.Mock()
self.processor.realigner.realign_reads.return_value = [], []
self.processor.sam_readers = [mock.Mock()]
self.processor.sam_readers[0].query.return_value = []
self.processor.in_memory_sam_reader = mock.Mock()
c1, c2 = mock.Mock(), mock.Mock()
e1, e2, e3 = mock.Mock(), mock.Mock(), mock.Mock()
self.add_mock('candidates_in_region', retval=([c1, c2], []))
mock_cpe = self.add_mock(
'create_pileup_examples', side_effect=[[e1], [e2, e3]])
mock_lc = self.add_mock('label_candidates')
candidates, examples, gvcfs, runtimes = self.processor.process(self.region)
self.assertEqual(candidates, [c1, c2])
self.assertEqual(examples, [e1, e2, e3])
self.assertEmpty(gvcfs)
self.assertIsInstance(runtimes, dict)
self.processor.sam_readers[0].query.assert_called_once_with(self.region)
self.processor.realigner.realign_reads.assert_called_once_with([],
self.region)
self.processor.in_memory_sam_reader.replace_reads.assert_called_once_with(
[])
self.assertEqual([mock.call(c1), mock.call(c2)], mock_cpe.call_args_list)
test_utils.assert_not_called_workaround(mock_lc)
def test_candidates_in_region_no_reads(self):
self.processor.in_memory_sam_reader = mock.Mock()
self.processor.in_memory_sam_reader.query.return_value = []
mock_ac = self.add_mock('_make_allele_counter_for_region')
self.assertEqual(([], []), self.processor.candidates_in_region(self.region))
self.processor.in_memory_sam_reader.query.assert_called_once_with(
self.region)
# A region with no reads should return out without making an AlleleCounter.
test_utils.assert_not_called_workaround(mock_ac)
@parameterized.parameters(True, False)
def test_candidates_in_region(self, include_gvcfs):
self.options.gvcf_filename = 'foo.vcf' if include_gvcfs else ''
self.processor.in_memory_sam_reader = mock.Mock()
reads = ['read1', 'read2']
self.processor.in_memory_sam_reader.query.return_value = reads
# Setup our make_allele_counter and other mocks.
mock_ac = mock.Mock()
mock_make_ac = self.add_mock(
'_make_allele_counter_for_region', retval=mock_ac)
# Setup our make_variant_caller and downstream mocks.
mock_vc = mock.Mock()
expected_calls = (['variant'], ['gvcf'] if include_gvcfs else [])
mock_vc.calls_and_gvcfs.return_value = expected_calls
self.processor.variant_caller = mock_vc
actual = self.processor.candidates_in_region(self.region)
# Make sure we're getting our reads for the region.
self.processor.in_memory_sam_reader.query.assert_called_once_with(
self.region)
# Make sure we're creating an AlleleCounter once and adding each of our
# reads to it.
mock_make_ac.assert_called_once_with(self.region)
self.assertEqual([mock.call(r, 'sample_id') for r in reads],
mock_ac.add.call_args_list)
# Make sure we call CallVariant for each of the counts returned by the
# allele counter.
mock_vc.calls_and_gvcfs.assert_called_once_with(mock_ac, include_gvcfs)
# Finally, our actual result should be the single 'variant' and potentially
# the gvcf records.
self.assertEqual(expected_calls, actual)
def test_create_pileup_examples_handles_none(self):
self.processor.pic = mock.Mock()
dv_call = mock.Mock()
self.processor.pic.create_pileup_images.return_value = None
self.assertEqual([], self.processor.create_pileup_examples(dv_call))
self.processor.pic.create_pileup_images.assert_called_once_with(
dv_call=dv_call,
reads_for_samples=[],
haplotype_alignments_for_samples=None,
haplotype_sequences=None)
def test_create_pileup_examples(self):
self.processor.pic = mock.Mock()
self.add_mock(
'_encode_tensor',
side_effect=[
(six.b('tensor1'), self.default_shape, self.default_format),
(six.b('tensor2'), self.default_shape, self.default_format)
])
dv_call = mock.Mock()
dv_call.variant = test_utils.make_variant(start=10, alleles=['A', 'C', 'G'])
ex = mock.Mock()
alt1, alt2 = ['C'], ['G']
self.processor.pic.create_pileup_images.return_value = [
(alt1, six.b('tensor1')), (alt2, six.b('tensor2'))
]
actual = self.processor.create_pileup_examples(dv_call)
self.processor.pic.create_pileup_images.assert_called_once_with(
dv_call=dv_call,
reads_for_samples=[],
haplotype_alignments_for_samples=None,
haplotype_sequences=None)
self.assertLen(actual, 2)
for ex, (alt, img) in zip(actual, [(alt1, six.b('tensor1')),
(alt2, six.b('tensor2'))]):
self.assertEqual(tf_utils.example_alt_alleles(ex), alt)
self.assertEqual(tf_utils.example_variant(ex), dv_call.variant)
self.assertEqual(tf_utils.example_encoded_image(ex), img)
self.assertEqual(tf_utils.example_image_shape(ex), self.default_shape)
self.assertEqual(
tf_utils.example_image_format(ex), six.b(self.default_format))
@parameterized.parameters(
# Test that a het variant gets a label value of 1 assigned to the example.
dict(
label=variant_labeler.VariantLabel(
is_confident=True,
variant=test_utils.make_variant(start=10, alleles=['A', 'C']),
genotype=(0, 1)),
expected_label_value=1,
),
# Test that a reference variant gets a label value of 0 in the example.
dict(
label=variant_labeler.VariantLabel(
is_confident=True,
variant=test_utils.make_variant(start=10, alleles=['A', '.']),
genotype=(0, 0)),
expected_label_value=0,
),
)
def test_add_label_to_example(self, label, expected_label_value):
example = self._example_for_variant(label.variant)
labeled = copy.deepcopy(example)
actual = self.processor.add_label_to_example(labeled, label)
# The add_label_to_example command modifies labeled and returns it.
self.assertIs(actual, labeled)
# Check that all keys from example are present in labeled.
for key, value in example.features.feature.items():
if key != 'variant/encoded': # Special case tested below.
self.assertEqual(value, labeled.features.feature[key])
# The genotype of our example_variant should be set to the true genotype
# according to our label.
self.assertEqual(expected_label_value, tf_utils.example_label(labeled))
labeled_variant = tf_utils.example_variant(labeled)
call = variant_utils.only_call(labeled_variant)
self.assertEqual(tuple(call.genotype), label.genotype)
# The original variant and labeled_variant from out tf.Example should be
# equal except for the genotype field, since this is set by
# add_label_to_example.
label.variant.calls[0].genotype[:] = []
call.genotype[:] = []
self.assertEqual(label.variant, labeled_variant)
def test_label_variant_raises_for_non_confident_variant(self):
label = variant_labeler.VariantLabel(
is_confident=False,
variant=test_utils.make_variant(start=10, alleles=['A', 'C']),
genotype=(0, 1))
example = self._example_for_variant(label.variant)
with six.assertRaisesRegex(
self, ValueError, 'Cannot add a non-confident label to an example'):
self.processor.add_label_to_example(example, label)
def _example_for_variant(self, variant):
return tf_utils.make_example(variant, list(variant.alternate_bases),
six.b('foo'), self.default_shape,
self.default_format)
@parameterized.parameters('sort_by_haplotypes', 'use_original_quality_scores')
def test_flags_strictly_needs_sam_aux_fields(
self, flags_strictly_needs_sam_aux_fields):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.examples = 'examples.tfrecord'
FLAGS[flags_strictly_needs_sam_aux_fields].value = True
FLAGS.parse_sam_aux_fields = False
with six.assertRaisesRegex(
self, Exception,
'If --{} is set then parse_sam_aux_fields must be set too.'.format(
flags_strictly_needs_sam_aux_fields)):
make_examples.default_options(add_flags=True)
@parameterized.parameters(
('add_hp_channel', True, None),
('add_hp_channel', False,
'WARGNING! --{} is set but --parse_sam_aux_fields is not set.'),
('add_hp_channel', None,
'Because --{}=true, --parse_sam_aux_fields is set to true to enable '
'reading auxiliary fields from reads.'),
)
def test_flag_optionally_needs_sam_aux_fields_with_different_parse_sam_aux_fields(
self, flag_optionally_needs_sam_aux_fields, parse_sam_aux_fields,
expected_message):
FLAGS.mode = 'calling'
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.reads = testdata.CHR20_BAM
FLAGS.examples = 'examples.tfrecord'
FLAGS[flag_optionally_needs_sam_aux_fields].value = True
FLAGS.parse_sam_aux_fields = parse_sam_aux_fields
with self.assertLogs() as logs:
make_examples.default_options(add_flags=True)
warning_messages = [x for x in logs.output if x.startswith('WARNING')]
if expected_message:
self.assertLen(warning_messages, 1)
self.assertRegex(
warning_messages[0],
expected_message.format(flag_optionally_needs_sam_aux_fields))
else:
self.assertEmpty(warning_messages)
@parameterized.parameters(
[
dict(window_width=221),
dict(window_width=1001),
],)
def test_align_to_all_haplotypes(self, window_width):
# align_to_all_haplotypes() will pull from the reference, so choose a
# real variant.
region = ranges.parse_literal('chr20:10,046,000-10,046,400')
nist_reader = vcf.VcfReader(testdata.TRUTH_VARIANTS_VCF)
nist_variants = list(nist_reader.query(region))
# We picked this region to have exactly one known variant:
# reference_bases: "AAGAAAGAAAG"
# alternate_bases: "A", a deletion of 10 bp
# start: 10046177
# end: 10046188
# reference_name: "chr20"
variant = nist_variants[0]
self.processor.pic = mock.Mock()
self.processor.pic.width = window_width
self.processor.pic.half_width = int((self.processor.pic.width - 1) / 2)
self.processor.realigner = mock.Mock()
# Using a real ref_reader to test that the reference allele matches
# between the variant and the reference at the variant's coordinates.
self.processor.realigner.ref_reader = self.ref_reader
read = test_utils.make_read(
'A' * 101, start=10046100, cigar='101M', quals=[30] * 101)
self.processor.realigner.align_to_haplotype = mock.Mock()
alt_info = self.processor.align_to_all_haplotypes(variant, [read])
hap_alignments = alt_info['alt_alignments']
hap_sequences = alt_info['alt_sequences']
# Both outputs are keyed by alt allele.
self.assertCountEqual(hap_alignments.keys(), ['A'])
self.assertCountEqual(hap_sequences.keys(), ['A'])
# Sequence must be the length of the window.
self.assertLen(hap_sequences['A'], self.processor.pic.width)
# align_to_haplotype should be called once for each alt (1 alt here).
self.processor.realigner.align_to_haplotype.assert_called_once()
# If variant reference_bases are wrong, it should raise a ValueError.
variant.reference_bases = 'G'
with six.assertRaisesRegex(self, ValueError,
'does not match the bases in the reference'):
self.processor.align_to_all_haplotypes(variant, [read])
@parameterized.parameters(
dict(
dv_calls=iter([
deepvariant_pb2.DeepVariantCall(
variant=variants_pb2.Variant(
reference_name='chr20',
start=60168,
end=60169,
reference_bases='C',
alternate_bases=['T']),
allele_support=None)
]),
expected_return=dict(C=0.9998, T=0.0002)))
def test_add_allele_frequencies_to_candidates(self, dv_calls,
expected_return):
pop_vcf_reader = vcf.VcfReader(testdata.VCF_WITH_ALLELE_FREQUENCIES)
self.processor.ref_reader = fasta.IndexedFastaReader(
testdata.CHR20_GRCH38_FASTA)
updated_dv_call = list(
self.processor.add_allele_frequencies_to_candidates(
dv_calls, pop_vcf_reader))
actual_frequency = updated_dv_call[0].allele_frequency
# Compare keys.
self.assertSetEqual(
set(actual_frequency.keys()), set(expected_return.keys()))
# Compare values (almost equal).
for key in actual_frequency.keys():
self.assertAlmostEqual(actual_frequency[key], expected_return[key])
@parameterized.parameters(
dict(
dv_calls=iter([
deepvariant_pb2.DeepVariantCall(
variant=variants_pb2.Variant(
reference_name='chrM',
start=10000,
end=10001,
reference_bases='T',
alternate_bases=['G']),
allele_support=None)
]),
expected_return=dict(T=1, G=0)))
def test_add_allele_frequencies_to_candidates_invalid_vcf(
self, dv_calls, expected_return):
pop_vcf_reader = None
self.processor.ref_reader = None
updated_dv_call = list(
self.processor.add_allele_frequencies_to_candidates(
dv_calls, pop_vcf_reader))
actual_frequency = updated_dv_call[0].allele_frequency
# Compare keys.
self.assertSetEqual(
set(actual_frequency.keys()), set(expected_return.keys()))
# Compare values (almost equal).
for key in actual_frequency.keys():
self.assertAlmostEqual(actual_frequency[key], expected_return[key])
if __name__ == '__main__':
absltest.main()
| bsd-3-clause |
mapr/hue | desktop/core/ext-py/Django-1.6.10/django/forms/util.py | 107 | 3776 | from __future__ import unicode_literals
from django.conf import settings
from django.utils.html import format_html, format_html_join
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils import six
import sys
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
The result is passed through 'mark_safe'.
"""
return format_html_join('', ' {0}="{1}"', sorted(attrs.items()))
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return ''
return format_html('<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}{1}</li>',
((k, force_text(v))
for k, v in self.items())
))
def as_text(self):
return '\n'.join(['* %s\n%s' % (k, '\n'.join([' * %s' % force_text(i) for i in v])) for k, v in self.items()])
@python_2_unicode_compatible
class ErrorList(list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return ''
return format_html('<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}</li>',
((force_text(e),) for e in self)
)
)
def as_text(self):
if not self: return ''
return '\n'.join(['* %s' % force_text(e) for e in self])
def __repr__(self):
return repr([force_text(e) for e in self])
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive dateimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| apache-2.0 |
eddiep1101/django-oscar | src/oscar/apps/catalogue/reviews/app.py | 58 | 1122 | from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from oscar.core.application import Application
from oscar.core.loading import get_class
class ProductReviewsApplication(Application):
name = None
hidable_feature_name = "reviews"
detail_view = get_class('catalogue.reviews.views', 'ProductReviewDetail')
create_view = get_class('catalogue.reviews.views', 'CreateProductReview')
vote_view = get_class('catalogue.reviews.views', 'AddVoteView')
list_view = get_class('catalogue.reviews.views', 'ProductReviewList')
def get_urls(self):
urls = [
url(r'^(?P<pk>\d+)/$', self.detail_view.as_view(),
name='reviews-detail'),
url(r'^add/$', self.create_view.as_view(),
name='reviews-add'),
url(r'^(?P<pk>\d+)/vote/$',
login_required(self.vote_view.as_view()),
name='reviews-vote'),
url(r'^$', self.list_view.as_view(), name='reviews-list'),
]
return self.post_process_urls(urls)
application = ProductReviewsApplication()
| bsd-3-clause |
apanju/GMIO_Odoo | addons/website_customer/__openerp__.py | 313 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Customer References',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Publish Your Customer References',
'version': '1.0',
'description': """
OpenERP Customer References
===========================
""",
'author': 'OpenERP SA',
'depends': [
'crm_partner_assign',
'website_partner',
'website_google_map',
],
'demo': [
'website_customer_demo.xml',
],
'data': [
'views/website_customer.xml',
],
'qweb': [],
'installable': True,
}
| agpl-3.0 |
orangeduck/PyAutoC | Python27/Lib/test/test_weakref.py | 35 | 42138 | import gc
import sys
import unittest
import UserList
import weakref
import operator
from test import test_support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
def create_unbound_method():
return C.method
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
self.check_basic_ref(create_unbound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
self.check_basic_callback(create_unbound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertTrue(ref1() is None,
"expected reference to be invalidated")
self.assertTrue(ref2() is None,
"expected reference to be invalidated")
self.assertTrue(self.cbcalled == 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
self.assertTrue(self.cbcalled == 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertTrue(ref() is not None,
"weak reference to live object should be live")
o2 = ref()
self.assertTrue(o is o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertTrue(self.cbcalled == 1,
"callback did not properly set 'cbcalled'")
self.assertTrue(ref() is None,
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertTrue(ref1 is ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertTrue(ref1 is ref2,
"reference object w/out callback should be re-used")
self.assertTrue(weakref.getweakrefcount(o) == 2,
"wrong weak ref count for object")
del proxy
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertTrue(proxy1 is proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = UserList.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
with test_support.check_py3k_warnings():
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = UserList.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = UserList.UserList(range(10))
p3 = weakref.proxy(L3)
with test_support.check_py3k_warnings():
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __unicode__(self):
return u"unicode"
instance = C()
self.assertIn("__unicode__", dir(weakref.proxy(instance)))
self.assertEqual(unicode(weakref.proxy(instance)), u"unicode")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertTrue(p1 is p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertTrue(p1 is p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertTrue(p1 is p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertTrue(p1 is p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertTrue(type(ref1) is weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertTrue(o.bar == 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertTrue(o.bar == 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertTrue(proxy.foo == 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertTrue(proxy.foo == 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertTrue(not hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertTrue(o.foo == 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertTrue(
o.foo == 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertTrue(not hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertTrue(weakref.getweakrefcount(o) == 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertTrue(weakref.getweakrefcount(o) == 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertTrue(weakref.getweakrefcount(o) == 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertTrue(weakref.getweakrefcount(1) == 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertTrue(weakref.getweakrefs(o) == [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertTrue(weakref.getweakrefs(o) == [ref1],
"list of refs does not match")
del ref1
self.assertTrue(weakref.getweakrefs(o) == [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertTrue(weakref.getweakrefs(1) == [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertTrue(p + 1.0 == 3.0)
self.assertTrue(1.0 + p == 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertTrue(external_wr() is callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that both old-style classes and new-style classes
# are weakrefable.
class A(object):
pass
class B:
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
b = weakref.ref(B, l.append)
B = None
gc.collect()
self.assertEqual(b(), None)
self.assertEqual(l, [a, b])
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super(MyRef, self).__init__(ob, callback)
def __call__(self):
self.called = True
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertTrue(mr() is o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertTrue(mr() is None)
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertTrue(r1 is not r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertTrue(r2 is refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertTrue(r1 is not r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
class MappingTestCase(TestBase):
COUNT = 10
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assertTrue(o is dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
self.assertTrue(items1 == items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertTrue(len(dict) == self.COUNT)
del objects[0]
self.assertTrue(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertTrue(len(dict) == 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assertTrue(o.arg is dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertTrue(set(items1) == set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertTrue(len(dict) == self.COUNT)
del objects[0]
self.assertTrue(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertTrue(len(dict) == 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.iterkeyrefs())), len(objects))
for wr in dict.iterkeyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = dict.items()
for item in dict.iteritems():
items.remove(item)
self.assertTrue(len(items) == 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
self.assertTrue(len(keys) == 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
self.assertTrue(len(keys) == 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
self.assertTrue(len(values) == 0,
"itervalues() did not touch all values")
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertTrue(dict[o] == 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertTrue(dict[o] == 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o] = o.arg
return dict, objects
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertTrue(len(weakdict) == 2)
k, v = weakdict.popitem()
self.assertTrue(len(weakdict) == 1)
if k is key1:
self.assertTrue(v is value1)
else:
self.assertTrue(v is value2)
k, v = weakdict.popitem()
self.assertTrue(len(weakdict) == 0)
if k is key1:
self.assertTrue(v is value1)
else:
self.assertTrue(v is value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertTrue(value1 is not value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict,
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict,
"original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertTrue(len(d) == 2)
del d[o1]
self.assertTrue(len(d) == 1)
self.assertTrue(d.keys() == [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertTrue(len(d) == 2)
del d['something']
self.assertTrue(len(d) == 1)
self.assertTrue(d.items() == [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = d.keys()
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print r() is obj
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print r()
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super(ExtendedRef, self).__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.iteritems():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super(ExtendedRef, self).__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print 'OK'
... else:
... print 'WeakValueDictionary error'
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
test_support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
test_support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
| bsd-2-clause |
bzbarsky/servo | tests/wpt/web-platform-tests/css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-style-property-tests.py | 841 | 3434 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-style-property-011 ~ 020 which
cover all possible values of text-emphasis-style property, except none
and <string>, with horizontal writing mode. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-style-property-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis-style: {title}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-style-property">
<meta name="assert" content="'text-emphasis-style: {value}' produces {code} as emphasis marks.">
<link rel="match" href="text-emphasis-style-property-{index:03}-ref.html">
<p>Pass if there is a '{char}' above every character below:</p>
<div style="line-height: 5; text-emphasis-style: {value}">試験テスト</div>
'''
REF_FILE = 'text-emphasis-style-property-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis-style: {0}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if there is a '{1}' above every character below:</p>
<div style="line-height: 5;"><ruby>試<rt>{1}</rt>験<rt>{1}</rt>テ<rt>{1}</rt>ス<rt>{1}</rt>ト<rt>{1}</rt></ruby></div>
'''
DATA_SET = [
('dot', 0x2022, 0x25e6),
('circle', 0x25cf, 0x25cb),
('double-circle', 0x25c9, 0x25ce),
('triangle', 0x25b2, 0x25b3),
('sesame', 0xfe45, 0xfe46),
]
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e']
def get_html_entity(code):
return '&#x{:04X};'.format(code)
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, style, code, name=None):
if not name:
name = style
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(index=idx, value=style,
char=get_html_entity(code),
code='U+{:04X}'.format(code),
title=name))
print("== {} {}".format(filename, REF_FILE.format(idx)))
idx = 10
def write_files(style, code):
global idx
idx += 1
fill, shape = style
basic_style = "{} {}".format(fill, shape)
write_file(REF_FILE.format(idx),
REF_TEMPLATE.format(basic_style, get_html_entity(code)))
suffix = iter(SUFFIXES)
write_test_file(idx, next(suffix), basic_style, code)
write_test_file(idx, next(suffix), "{} {}".format(shape, fill), code)
if fill == 'filled':
write_test_file(idx, next(suffix), shape, code)
if shape == 'circle':
write_test_file(idx, next(suffix), fill, code, fill + ', horizontal')
print("# START tests from {}".format(__file__))
for name, code, _ in DATA_SET:
write_files(('filled', name), code)
for name, _, code in DATA_SET:
write_files(('open', name), code)
print("# END tests from {}".format(__file__))
| mpl-2.0 |
gdooper/scipy | scipy/fftpack/tests/test_import.py | 49 | 1352 | """Test possibility of patching fftpack with pyfftw.
No module source outside of scipy.fftpack should contain an import of
the form `from scipy.fftpack import ...`, so that a simple replacement
of scipy.fftpack by the corresponding fftw interface completely swaps
the two FFT implementations.
Because this simply inspects source files, we only need to run the test
on one version of Python.
"""
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import re
import tokenize
from numpy.testing import TestCase, assert_, run_module_suite
import scipy
class TestFFTPackImport(TestCase):
def test_fftpack_import(self):
base = Path(scipy.__file__).parent
regexp = r"\s*from.+\.fftpack import .*\n"
for path in base.rglob("*.py"):
if base / "fftpack" in path.parents:
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
assert_(all(not re.fullmatch(regexp, line)
for line in file),
"{0} contains an import from fftpack".format(path))
if __name__ == "__main__":
run_module_suite(argv=sys.argv)
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/261_test_module.py | 51 | 2085 | # Test the module type
import unittest
from test.test_support import run_unittest
import sys
ModuleType = type(sys)
class ModuleTests(unittest.TestCase):
def test_uninitialized(self):
# An uninitialized module has no __dict__ or __name__,
# and __doc__ is None
foo = ModuleType.__new__(ModuleType)
self.failUnless(foo.__dict__ is None)
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, ModuleType.__doc__)
def test_no_docstring(self):
# Regularly initialized module, no docstring
foo = ModuleType("foo")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertEqual(foo.__dict__, {"__name__": "foo", "__doc__": None})
def test_ascii_docstring(self):
# ASCII docstring
foo = ModuleType("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc"})
def test_unicode_docstring(self):
# Unicode docstring
foo = ModuleType("foo", u"foodoc\u1234")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, u"foodoc\u1234")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": u"foodoc\u1234"})
def test_reinit(self):
# Reinitialization should not replace the __dict__
foo = ModuleType("foo", u"foodoc\u1234")
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.bar, 42)
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc", "bar": 42})
self.failUnless(foo.__dict__ is d)
def test_main():
run_unittest(ModuleTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
rohit12/opencog | opencog/python/pygephi.py | 36 | 1507 | import urllib2
import json
class JSONClient(object):
def __init__(self, url='http://127.0.0.1:8080/workspace0', autoflush=False):
self.url = url
self.data = ""
self.autoflush = autoflush
def flush(self):
if len(self.data) > 0:
self.__send(self.data)
self.data = ""
def __send(self, data):
conn = urllib2.urlopen(self.url+ '?operation=updateGraph', data)
return conn.read()
def add_node(self, id, flush=True, **attributes):
self.data += json.dumps({"an":{id:attributes}}) + '\r\n'
if(self.autoflush): self.flush()
def delete_node(self, id):
self.__send(json.dumps({"dn":{id:{}}}))
def change_node(self, id, **attributes):
self.data += json.dumps({"cn":{id:attributes}}) + '\r\n'
if(self.autoflush): self.flush()
def add_edge(self, id, source, target, directed=True, **attributes):
attributes['source'] = source
attributes['target'] = target
attributes['directed'] = directed
self.data += json.dumps({"ae":{id:attributes}}) + '\r\n'
if(self.autoflush): self.flush()
def delete_edge(self, id):
self.__send(json.dumps({"de":{id:{}}}))
def change_edge(self, id, **attributes):
self.data += json.dumps({"ce":{id:attributes}}) + '\r\n'
if(self.autoflush): self.flush()
def clean(self):
self.__send(json.dumps({"dn":{"filter":"ALL"}}))
| agpl-3.0 |
camilonos77/bootstrap-form-python-generator | enviroment/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-2.0 |
axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/markupsafe/__init__.py | 701 | 10338 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import string
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(
self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_simple_escaping_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
def format(*args, **kwargs):
self, args = args[0], args[1:]
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError('Unsupported format specification '
'for Markup.')
return self
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_simple_escaping_wrapper('__getslice__')
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, 'format'):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, '__html_format__'):
rv = value.__html_format__(format_spec)
elif hasattr(value, '__html__'):
if format_spec:
raise ValueError('No format specification allowed '
'when formatting an object with '
'its __html__ method.')
rv = value.__html__()
else:
rv = string.Formatter.format_field(self, value, format_spec)
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| apache-2.0 |
Peddle/hue | desktop/core/ext-py/py4j-0.9/src/py4j/tests/java_set_test.py | 18 | 3061 | """
Created on Mar 26, 2010
@author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
import unittest
from py4j.java_gateway import JavaGateway, GatewayParameters
from py4j.tests.java_gateway_test import (
start_example_app_process, safe_shutdown, sleep)
class AutoConvertTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_convert=True))
def tearDown(self):
safe_shutdown(self)
self.p.join()
sleep()
def testAutoConvert(self):
sj = self.gateway.jvm.java.util.HashSet()
sj.add("b")
sj.add(1)
sp = set([1, "b"])
self.assertTrue(sj.equals(sp))
class SetTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
sleep()
def testTreeSet(self):
set1 = set()
set2 = self.gateway.jvm.java.util.TreeSet()
set1.add("a")
set2.add("a")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual(repr(set1), repr(set2))
set1.add("b")
set2.add("b")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
# not a good assumption with Python 3.3. Oh dear.
# self.assertEqual(repr(set1), repr(set2))
set1.remove("a")
set2.remove("a")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
# self.assertEqual(repr(set1), repr(set2))
set1.clear()
set2.clear()
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
# self.assertEqual(repr(set1), repr(set2))
def testHashSet(self):
set1 = set()
set2 = self.gateway.jvm.java.util.HashSet()
set1.add("a")
set2.add("a")
set1.add(1)
set2.add(1)
set1.add("b")
set2.add("b")
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
self.assertEqual(1 in set1, 1 in set2)
set1.remove(1)
set2.remove(1)
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
self.assertEqual(1 in set1, 1 in set2)
set1.clear()
set2.clear()
self.assertEqual(len(set1), len(set2))
self.assertEqual("a" in set1, "a" in set2)
self.assertEqual("b" in set1, "b" in set2)
self.assertEqual(1 in set1, 1 in set2)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
haad/ansible-modules-extras | system/seport.py | 33 | 9002 | #!/usr/bin/python
# (c) 2014, Dan Keder <dan.keder@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: seport
short_description: Manages SELinux network port type definitions
description:
- Manages SELinux network port type definitions.
version_added: "2.0"
options:
ports:
description:
- Ports or port ranges, separated by a comma
required: true
default: null
proto:
description:
- Protocol for the specified port.
required: true
default: null
choices: [ 'tcp', 'udp' ]
setype:
description:
- SELinux type for the specified port.
required: true
default: null
state:
description:
- Desired boolean value.
required: true
default: present
choices: [ 'present', 'absent' ]
reload:
description:
- Reload SELinux policy after commit.
required: false
default: yes
notes:
- The changes are persistent across reboots
- Not tested on any debian based system
requirements: [ 'libselinux-python', 'policycoreutils-python' ]
author: Dan Keder
'''
EXAMPLES = '''
# Allow Apache to listen on tcp port 8888
- seport: ports=8888 proto=tcp setype=http_port_t state=present
# Allow sshd to listen on tcp port 8991
- seport: ports=8991 proto=tcp setype=ssh_port_t state=present
# Allow memcached to listen on tcp ports 10000-10100 and 10112
- seport: ports=10000-10100,10112 proto=tcp setype=memcache_port_t state=present
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import seobject
HAVE_SEOBJECT=True
except ImportError:
HAVE_SEOBJECT=False
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def semanage_port_get_ports(seport, setype, proto):
""" Get the list of ports that have the specified type definition.
:param seport: Instance of seobject.portRecords
:type setype: str
:param setype: SELinux type.
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: list
:return: List of ports that have the specified SELinux type.
"""
records = seport.get_all_by_type()
if (setype, proto) in records:
return records[(setype, proto)]
else:
return []
def semanage_port_get_type(seport, port, proto):
""" Get the SELinux type of the specified port.
:param seport: Instance of seobject.portRecords
:type port: str
:param port: Port or port range (example: "8080", "8080-9090")
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: tuple
:return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
"""
ports = port.split('-', 1)
if len(ports) == 1:
ports.extend(ports)
key = (int(ports[0]), int(ports[1]), proto)
records = seport.get_all()
if key in records:
return records[key]
else:
return None
def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
""" Add SELinux port type definition to the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type serange: str
:param serange: SELinux MLS/MCS range (defaults to 's0')
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port not in ports_by_type:
change = True
port_type = semanage_port_get_type(seport, port, proto)
if port_type is None and not module.check_mode:
seport.add(port, proto, serange, setype)
elif port_type is not None and not module.check_mode:
seport.modify(port, proto, serange, setype)
except ValueError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except IOError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except KeyError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except OSError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except RuntimeError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
return change
def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
""" Delete SELinux port type definition from the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type.
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port in ports_by_type:
change = True
if not module.check_mode:
seport.delete(port, proto)
except ValueError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except IOError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except KeyError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except OSError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except RuntimeError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
return change
def main():
module = AnsibleModule(
argument_spec={
'ports': {
'required': True,
},
'proto': {
'required': True,
'choices': ['tcp', 'udp'],
},
'setype': {
'required': True,
},
'state': {
'required': True,
'choices': ['present', 'absent'],
},
'reload': {
'required': False,
'type': 'bool',
'default': 'yes',
},
},
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python")
if not HAVE_SEOBJECT:
module.fail_json(msg="This module requires policycoreutils-python")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
ports = [x.strip() for x in str(module.params['ports']).split(',')]
proto = module.params['proto']
setype = module.params['setype']
state = module.params['state']
do_reload = module.params['reload']
result = {
'ports': ports,
'proto': proto,
'setype': setype,
'state': state,
}
if state == 'present':
result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
elif state == 'absent':
result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
module.exit_json(**result)
main()
| gpl-3.0 |
jimi-c/ansible | lib/ansible/utils/module_docs_fragments/cnos.py | 33 | 3747 | # Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Standard CNOS documentation fragment
DOCUMENTATION = '''
options:
outputfile:
description:
- This specifies the file path where the output of each command
execution is saved. Each command that is specified in the merged
template file and each response from the device are saved here.
Usually the location is the results folder, but you can
choose another location based on your write permission.
required: true
version_added: 2.3
host:
description:
- This is the variable used to search the hosts file at
/etc/ansible/hosts and identify the IP address of the device on
which the template is going to be applied. Usually the Ansible
keyword {{ inventory_hostname }} is specified in the playbook as
an abstraction of the group of network elements that need to be
configured.
required: true
version_added: 2.3
username:
description:
- Configures the username used to authenticate the connection to
the remote device. The value of the username parameter is used to
authenticate the SSH session. While generally the value should
come from the inventory file, you can also specify it as a
variable. This parameter is optional. If it is not specified, no
default value will be used.
required: true
version_added: 2.3
password:
description:
- Configures the password used to authenticate the connection to
the remote device. The value of the password parameter is used to
authenticate the SSH session. While generally the value should
come from the inventory file, you can also specify it as a
variable. This parameter is optional. If it is not specified, no
default value will be used.
required: true
version_added: 2.3
enablePassword:
description:
- Configures the password used to enter Global Configuration
command mode on the switch. If the switch does not request this
password, the parameter is ignored.While generally the value
should come from the inventory file, you can also specify it as a
variable. This parameter is optional. If it is not specified,
no default value will be used.
version_added: 2.3
deviceType:
description:
- This specifies the type of device where the method is executed.
The choices NE1072T,NE1032,NE1032T,NE10032,
NE2572 are added since version 2.4
required: Yes
choices: [g8272_cnos,g8296_cnos,g8332_cnos,NE1072T,NE1032,
NE1032T,NE10032,NE2572]
version_added: 2.3
notes:
- For more information on using Ansible to manage Lenovo Network devices see U(https://www.ansible.com/ansible-lenovo).
'''
| gpl-3.0 |
Klozz/aircrack-ng | scripts/airgraph-ng/graphviz/libOuiParse.py | 89 | 7562 | #!/usr/bin/env python
__author__ = 'Ben "TheX1le" Smith, Marfi'
__email__ = 'thex1le@gmail.com'
__website__= ''
__date__ = '04/26/2011'
__version__ = '2011.4.26'
__file__ = 'ouiParse.py'
__data__ = 'a class for dealing with the oui txt file'
"""
########################################
#
# This program and its support programs are free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation; version 2.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
#########################################
"""
import re, urllib, sys, os
import pdb
#this lib is crap and needs to be rewritten -Textile
if os.path.isdir('./support/'):
path='./support/'
elif os.path.isdir('/usr/local/share/airgraph-ng/'):
path='/usr/local/share/airgraph-ng/'
elif os.path.isdir('/usr/share/airgraph-ng/'):
path='/usr/share/airgraph-ng/'
else:
raise Exception("Could not determine path, please, check your installation")
class macOUI_lookup:
"""
A class for deaing with OUIs and deterimining device type
"""
def __init__(self, oui=False):
"""
generate the two dictionaries and return them
"""
#a poor fix where if we have no file it trys to download it
self.ouiTxtUrl = "http://standards.ieee.org/regauth/oui/oui.txt"
self.ouiTxt = oui
if not oui or not os.path.isfile(self.ouiTxt):
self.ouiUpdate()
self.ouiTxt = path + "oui.txt"
self.last_error = None
self.identDeviceDict(path + 'ouiDevice.txt')
self.identDeviceDictWhacMac(path + 'whatcDB.csv')
self.ouiRaw = self.ouiOpen(self.ouiTxt)
self.oui_company = self.ouiParse() #dict where oui's are the keys to company names
self.company_oui = self.companyParse() #dict where company name is the key to oui's
def compKeyChk(self,name):
"""
check for valid company name key
"""
compMatch = re.compile(name,re.I)
if self.company_oui.has_key(name):
return True
for key in self.company_oui.keys():
if compMatch.search(key) is not None:
return True
return False
def ouiKeyChk(self,name):
"""
check for a valid oui prefix
"""
if self.oui_company.has_key(name):
return True
else:
return False
def lookup_OUI(self,mac):
"""
Lookup a oui and return the company name
"""
if self.ouiKeyChk(mac) is not False:
return self.oui_company[mac]
else:
return False
def lookup_company(self,companyLst):
"""
look up a company name and return their OUI's
"""
oui = []
if type(companyLst).__name__ == "list":
for name in companyLst:
compMatch = re.compile(name,re.I)
if self.company_oui.has_key(name):
oui.extend(self.company_oui[name])
else:
for key in self.company_oui:
if compMatch.search(key) is not None:
oui.extend(self.company_oui[key])
elif type(companyLst).__name__ == "str":
if self.company_oui.has_key(companyLst):
oui = self.company_oui[companyLst]
else:
compMatch = re.compile(companyLst,re.I)
for key in self.company_oui:
if compMatch.search(key) is not None:
oui.extend(self.company_oui[key]) #return the oui for that key
return oui
def ouiOpen(self,fname,flag='R'):
"""
open the file and read it in
flag denotes use of read or readlines
"""
try:
ouiFile = open(fname, "r")
if flag == 'RL':
text = ouiFile.readlines()
elif flag == 'R':
text = ouiFile.read()
return text
except IOError:
return False
def ouiParse(self):
"""
generate a oui to company lookup dict
"""
HexOui= {}
Hex = re.compile('.*(hex).*')
#matches the following example "00-00-00 (hex)\t\tXEROX CORPORATION"
ouiLines = self.ouiRaw.split("\n\n")
#split each company into a list one company per position
for line in ouiLines:
if Hex.search(line) != None:
lineList = Hex.search(line).group().replace("\t"," ").split(" ")
#return the matched text and build a list out of it
HexOui[lineList[0].replace("-",":")] = lineList[2]
#build a dict in the format of mac:company name
return HexOui
def companyParse(self):
"""
generate a company to oui lookup dict
"""
company_oui = {}
for oui in self.oui_company:
if company_oui.has_key(self.oui_company[oui][0]):
company_oui[self.oui_company[oui][0]].append(oui)
else:
company_oui[self.oui_company[oui][0]] = [oui]
return company_oui
def ouiUpdate(self):
"""
Grab the oui txt file off the ieee.org website
"""
try:
print("Getting OUI file from %s to %s" %(self.ouiTxtUrl, path))
urllib.urlretrieve(self.ouiTxtUrl, path + "oui.txt")
print "Completed Successfully"
except Exception, error:
print("Could not download file:\n %s\n Exiting airgraph-ng" %(error))
sys.exit(0)
def identDeviceDict(self,fname):
"""
Create two dicts allowing device type lookup
one for oui to device and one from device to OUI group
"""
self.ouitodevice = {}
self.devicetooui = {}
data = self.ouiOpen(fname,'RL')
if data == False:
self.last_error = "Unable to open lookup file for parsing"
return False
for line in data:
dat = line.strip().split(',')
self.ouitodevice[dat[1]] = dat[0]
if dat[0] in self.devicetooui.keys():
self.devicetooui[dat[0]].append(dat[1])
else:
self.devicetooui[dat[0]] = [dat[1]]
def identDeviceDictWhacMac(self,fname):
"""
Create two dicts allowing device type lookup from whatmac DB
one for oui to device and one from the device to OUI group
"""
self.ouitodeviceWhatmac3 = {}
self.ouitodeviceWhatmac = {}
self.devicetoouiWhacmac = {}
data = self.ouiOpen(fname,'RL')
if data == False:
self.last_error = "Unble to open lookup file for parsing"
return False
for line in data:
dat = line.strip().split(',')
dat[0] = dat[0].upper()
self.ouitodeviceWhatmac[dat[0]] = dat[1]
self.ouitodeviceWhatmac3[dat[0][0:8]] = dat[1] # a db to support the 3byte lookup from whatmac
if dat[1] in self.devicetoouiWhacmac.keys():
self.devicetoouiWhacmac[dat[1]].append(dat[0])
else:
self.devicetoouiWhacmac[dat[1]] = [dat[0]]
| gpl-2.0 |
Perferom/android_external_chromium_org | third_party/npapi/npspy/analyze_streams.py | 127 | 3162 | # A script for analyzing the output of NPSPY and merging data about streams.
import sys
def ReadFile(filename, flags='rb'):
"""Returns the contents of a file."""
file = open(filename, flags)
result = file.read()
file.close()
return result
def WriteFile(filename, contents):
"""Overwrites the file with the given contents."""
file = open(filename, 'w')
file.write(contents)
file.close()
# sample line: 'NPP_NewStream(0x645c898, 0x56ba900("application/x-shockwave-flash"), 0x64bb3b0 (http://weeklyad.target.com/target/flash/target/target.swf?ver=090326), TRUE, NP_NORMAL)'
class Stream:
def __init__(self, line):
split = line.split(', ')
self.mime_type = split[1].split('"')[1]
self.url = split[2].split(' ')[1].strip('()')
self.seekable = split[3]
self.type = split[4].strip(')')
self.size = 0
self.status = ''
try:
self.address = split[2].split(' ')[0]
except:
print 'parsing error on ' + line
self.address = ''
if self.type != 'NP_NORMAL':
print 'line got unexpected type: ' + line
def main(argv=None):
if argv is None:
argv = sys.argv
streams = []
if len(argv) != 2:
print 'need filename'
return
file = ReadFile(argv[1])
for line in file.splitlines():
if line.startswith('NPP_NewStream('):
if line.count('(') < 3:
print 'unknown format for line: ' + line
continue
s = Stream(line)
streams.append(s)
elif line.startswith('NPP_Write('):
# sample: NPP_Write(0x645c898, 0x64bb3b0, 0, 16384, 0x56c1000("CW")))
split = line.split(', ')
address = split[1]
start = int(split[2])
size = int(split[3])
found = False
for stream in streams:
if stream.address == address:
if stream.size != start:
print 'error: starting at wrong place for write ' + stream.url + ' ' + str(stream.size) + ' ' + str(start)
stream.size += size
found = True
break
if not found:
print "couldn't find stream to match NPP_Write " + line
elif line.startswith('NPP_DestroyStream('):
# sample: NPP_DestroyStream(0x645c898, 0x64bb3b0, NPRES_DONE)
split = line.split(', ')
address = split[1]
status = split[2].strip(')')
found = False
for stream in streams:
if stream.address == address:
stream.status = status
stream.address = '' # address can be reused
found = True
break
if not found:
print "couldn't find stream to match NPP_DestroyStream " + line
output = []
for stream in streams:
if stream.status != 'NPRES_DONE':
print 'error: no NPP_DestroyStream with success for ' + stream.url + ' ' + stream.status + '.'
output.append(', '.join([stream.url, stream.mime_type, str(stream.size), stream.seekable]))
output_file = argv[1].replace('.', '_analyzed.')
WriteFile(output_file, '\n'.join(output))
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
taliax/easybuild-easyblocks | easybuild/easyblocks/t/tbb.py | 5 | 4276 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing the Intel Threading Building Blocks (TBB) library, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
import glob
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.intelbase import IntelBase, ACTIVATION_NAME_2012, LICENSE_FILE_NAME_2012
from easybuild.tools.build_log import EasyBuildError
class EB_tbb(IntelBase):
"""EasyBlock for tbb, threading building blocks"""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for tbb"""
super(EB_tbb, self).__init__(*args, **kwargs)
self.libpath = 'UNKNOWN'
def install_step(self):
"""Custom install step, to add extra symlinks"""
silent_cfg_names_map = None
if LooseVersion(self.version) < LooseVersion('4.2'):
silent_cfg_names_map = {
'activation_name': ACTIVATION_NAME_2012,
'license_file_name': LICENSE_FILE_NAME_2012,
}
super(EB_tbb, self).install_step(silent_cfg_names_map=silent_cfg_names_map)
# save libdir
os.chdir(self.installdir)
if LooseVersion(self.version) < LooseVersion('4.1.0'):
libglob = 'tbb/lib/intel64/cc*libc*_kernel*'
else:
libglob = 'tbb/lib/intel64/gcc*'
libs = sorted(glob.glob(libglob), key=LooseVersion)
if len(libs):
libdir = libs[-1] # take the last one, should be ordered by cc get_version.
# we're only interested in the last bit
libdir = libdir.split('/')[-1]
else:
raise EasyBuildError("No libs found using %s in %s", libglob, self.installdir)
self.libdir = libdir
self.libpath = os.path.join('tbb', 'libs', 'intel64', libdir)
self.log.debug("self.libpath: %s" % self.libpath)
# applications go looking into tbb/lib so we move what's in there to libs
# and symlink the right lib from /tbb/libs/intel64/... to lib
install_libpath = os.path.join(self.installdir, 'tbb', 'lib')
shutil.move(install_libpath, os.path.join(self.installdir, 'tbb', 'libs'))
os.symlink(os.path.join(self.installdir, self.libpath), install_libpath)
def sanity_check_step(self):
custom_paths = {
'files': [],
'dirs': ['tbb/bin', 'tbb/lib', 'tbb/libs'],
}
super(EB_tbb, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Add correct path to lib to LD_LIBRARY_PATH. and intel license file"""
txt = super(EB_tbb, self).make_module_extra()
txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', [self.libpath])
txt += self.module_generator.prepend_paths('LIBRARY_PATH', [self.libpath])
txt += self.module_generator.prepend_paths('CPATH', [os.path.join('tbb', 'include')])
txt += self.module_generator.set_environment('TBBROOT', os.path.join(self.installdir, 'tbb'))
return txt
| gpl-2.0 |
slohse/ansible | lib/ansible/modules/network/f5/bigip_firewall_port_list.py | 9 | 17550 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_port_list
short_description: Manage port lists on BIG-IP AFM
description:
- Manages the AFM port lists on a BIG-IP. This module can be used to add
and remove port list entries.
version_added: 2.5
options:
name:
description:
- Specifies the name of the port list.
required: True
partition:
description:
- Device partition to manage resources on.
default: Common
description:
description:
- Description of the port list
ports:
description:
- Simple list of port values to add to the list
port_ranges:
description:
- A list of port ranges where the range starts with a port number, is followed
by a dash (-) and then a second number.
- If the first number is greater than the second number, the numbers will be
reversed so-as to be properly formatted. ie, 90-78 would become 78-90.
port_lists:
description:
- Simple list of existing port lists to add to this list. Port lists can be
specified in either their fully qualified name (/Common/foo) or their short
name (foo). If a short name is used, the C(partition) argument will automatically
be prepended to the short name.
state:
description:
- When C(present), ensures that the address list and entries exists.
- When C(absent), ensures the address list is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a simple port list
bigip_firewall_port_list:
name: foo
ports:
- 80
- 443
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Override the above list of ports with a new list
bigip_firewall_port_list:
name: foo
ports:
- 3389
- 8080
- 25
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Create port list with series of ranges
bigip_firewall_port_list:
name: foo
port_ranges:
- 25-30
- 80-500
- 50-78
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Use multiple types of port arguments
bigip_firewall_port_list:
name: foo
port_ranges:
- 25-30
- 80-500
- 50-78
ports:
- 8080
- 443
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Remove port list
bigip_firewall_port_list:
name: foo
password: secret
server: lb.mydomain.com
state: absent
user: admin
delegate_to: localhost
- name: Create port list from a file with one port per line
bigip_firewall_port_list:
name: lot-of-ports
ports: "{{ lookup('file', 'my-large-port-list.txt').split('\n') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the port list.
returned: changed
type: string
sample: My port list
ports:
description: The new list of ports applied to the port list.
returned: changed
type: list
sample: [80, 443]
port_ranges:
description: The new list of port ranges applied to the port list.
returned: changed
type: list
sample: [80-100, 200-8080]
port_lists:
description: The new list of port list names applied to the port list.
returned: changed
type: list
sample: [/Common/list1, /Common/list2]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'portLists': 'port_lists'
}
api_attributes = [
'portLists', 'ports', 'description'
]
returnables = [
'ports', 'port_ranges', 'port_lists', 'description'
]
updatables = [
'description', 'ports', 'port_ranges', 'port_lists'
]
class ApiParameters(Parameters):
@property
def port_ranges(self):
if self._values['ports'] is None:
return None
result = []
for port_range in self._values['ports']:
if '-' not in port_range['name']:
continue
start, stop = port_range['name'].split('-')
start = int(start.strip())
stop = int(stop.strip())
if start > stop:
stop, start = start, stop
item = '{0}-{1}'.format(start, stop)
result.append(item)
return result
@property
def port_lists(self):
if self._values['port_lists'] is None:
return None
result = []
for x in self._values['port_lists']:
item = '/{0}/{1}'.format(x['partition'], x['name'])
result.append(item)
return result
@property
def ports(self):
if self._values['ports'] is None:
return None
result = [int(x['name']) for x in self._values['ports'] if '-' not in x['name']]
return result
class ModuleParameters(Parameters):
@property
def ports(self):
if self._values['ports'] is None:
return None
if any(x for x in self._values['ports'] if '-' in str(x)):
raise F5ModuleError(
"Ports must be whole numbers between 0 and 65,535"
)
if any(x for x in self._values['ports'] if 0 < int(x) > 65535):
raise F5ModuleError(
"Ports must be whole numbers between 0 and 65,535"
)
result = [int(x) for x in self._values['ports']]
return result
@property
def port_ranges(self):
if self._values['port_ranges'] is None:
return None
result = []
for port_range in self._values['port_ranges']:
if '-' not in port_range:
continue
start, stop = port_range.split('-')
start = int(start.strip())
stop = int(stop.strip())
if start > stop:
stop, start = start, stop
if 0 < start > 65535 or 0 < stop > 65535:
raise F5ModuleError(
"Ports must be whole numbers between 0 and 65,535"
)
item = '{0}-{1}'.format(start, stop)
result.append(item)
return result
@property
def port_lists(self):
if self._values['port_lists'] is None:
return None
result = []
for x in self._values['port_lists']:
item = fq_name(self.partition, x)
result.append(item)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ReportableChanges(Changes):
@property
def ports(self):
result = []
for item in self._values['ports']:
if '-' in item['name']:
continue
result.append(item['name'])
return result
@property
def port_ranges(self):
result = []
for item in self._values['ports']:
if '-' not in item['name']:
continue
result.append(item['name'])
return result
class UsableChanges(Changes):
@property
def ports(self):
if self._values['ports'] is None and self._values['port_ranges'] is None:
return None
result = []
if self._values['ports']:
# The values of the 'key' index literally need to be string values.
# If they are not, on BIG-IP 12.1.0 they will raise this REST exception.
#
# {
# "code": 400,
# "message": "one or more configuration identifiers must be provided",
# "errorStack": [],
# "apiError": 26214401
# }
result += [dict(name=str(x)) for x in self._values['ports']]
if self._values['port_ranges']:
result += [dict(name=str(x)) for x in self._values['port_ranges']]
return result
@property
def port_lists(self):
if self._values['port_lists'] is None:
return None
result = []
for x in self._values['port_lists']:
partition, name = x.split('/')[1:]
result.append(dict(
name=name,
partition=partition
))
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def ports(self):
if self.want.ports is None:
return None
elif self.have.ports is None:
return self.want.ports
if sorted(self.want.ports) != sorted(self.have.ports):
return self.want.ports
@property
def port_lists(self):
if self.want.port_lists is None:
return None
elif self.have.port_lists is None:
return self.want.port_lists
if sorted(self.want.port_lists) != sorted(self.have.port_lists):
return self.want.port_lists
@property
def port_ranges(self):
if self.want.port_ranges is None:
return None
elif self.have.port_ranges is None:
return self.want.port_ranges
if sorted(self.want.port_ranges) != sorted(self.have.port_ranges):
return self.want.port_ranges
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.security.firewall.port_lists.port_list.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.security.firewall.port_lists.port_list.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.security.firewall.port_lists.port_list.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.security.firewall.port_lists.port_list.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.security.firewall.port_lists.port_list.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
ports=dict(type='list'),
port_ranges=dict(type='list'),
port_lists=dict(type='list'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/idlelib/ParenMatch.py | 113 | 6713 | """ParenMatch -- An IDLE extension for parenthesis matching.
When you hit a right paren, the cursor should move briefly to the left
paren. Paren here is used generically; the matching applies to
parentheses, square brackets, and curly braces.
"""
from idlelib.HyperParser import HyperParser
from idlelib.configHandler import idleConf
_openers = {')':'(',']':'[','}':'{'}
CHECK_DELAY = 100 # miliseconds
class ParenMatch:
"""Highlight matching parentheses
There are three supported style of paren matching, based loosely
on the Emacs options. The style is select based on the
HILITE_STYLE attribute; it can be changed used the set_style
method.
The supported styles are:
default -- When a right paren is typed, highlight the matching
left paren for 1/2 sec.
expression -- When a right paren is typed, highlight the entire
expression from the left paren to the right paren.
TODO:
- extend IDLE with configuration dialog to change options
- implement rest of Emacs highlight styles (see below)
- print mismatch warning in IDLE status window
Note: In Emacs, there are several styles of highlight where the
matching paren is highlighted whenever the cursor is immediately
to the right of a right paren. I don't know how to do that in Tk,
so I haven't bothered.
"""
menudefs = [
('edit', [
("Show surrounding parens", "<<flash-paren>>"),
])
]
STYLE = idleConf.GetOption('extensions','ParenMatch','style',
default='expression')
FLASH_DELAY = idleConf.GetOption('extensions','ParenMatch','flash-delay',
type='int',default=500)
HILITE_CONFIG = idleConf.GetHighlight(idleConf.CurrentTheme(),'hilite')
BELL = idleConf.GetOption('extensions','ParenMatch','bell',
type='bool',default=1)
RESTORE_VIRTUAL_EVENT_NAME = "<<parenmatch-check-restore>>"
# We want the restore event be called before the usual return and
# backspace events.
RESTORE_SEQUENCES = ("<KeyPress>", "<ButtonPress>",
"<Key-Return>", "<Key-BackSpace>")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
# Bind the check-restore event to the function restore_event,
# so that we can then use activate_restore (which calls event_add)
# and deactivate_restore (which calls event_delete).
editwin.text.bind(self.RESTORE_VIRTUAL_EVENT_NAME,
self.restore_event)
self.counter = 0
self.is_restore_active = 0
self.set_style(self.STYLE)
def activate_restore(self):
if not self.is_restore_active:
for seq in self.RESTORE_SEQUENCES:
self.text.event_add(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
self.is_restore_active = True
def deactivate_restore(self):
if self.is_restore_active:
for seq in self.RESTORE_SEQUENCES:
self.text.event_delete(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
self.is_restore_active = False
def set_style(self, style):
self.STYLE = style
if style == "default":
self.create_tag = self.create_tag_default
self.set_timeout = self.set_timeout_last
elif style == "expression":
self.create_tag = self.create_tag_expression
self.set_timeout = self.set_timeout_none
def flash_paren_event(self, event):
indices = (HyperParser(self.editwin, "insert")
.get_surrounding_brackets())
if indices is None:
self.warn_mismatched()
return
self.activate_restore()
self.create_tag(indices)
self.set_timeout_last()
def paren_closed_event(self, event):
# If it was a shortcut and not really a closing paren, quit.
closer = self.text.get("insert-1c")
if closer not in _openers:
return
hp = HyperParser(self.editwin, "insert-1c")
if not hp.is_in_code():
return
indices = hp.get_surrounding_brackets(_openers[closer], True)
if indices is None:
self.warn_mismatched()
return
self.activate_restore()
self.create_tag(indices)
self.set_timeout()
def restore_event(self, event=None):
self.text.tag_delete("paren")
self.deactivate_restore()
self.counter += 1 # disable the last timer, if there is one.
def handle_restore_timer(self, timer_count):
if timer_count == self.counter:
self.restore_event()
def warn_mismatched(self):
if self.BELL:
self.text.bell()
# any one of the create_tag_XXX methods can be used depending on
# the style
def create_tag_default(self, indices):
"""Highlight the single paren that matches"""
self.text.tag_add("paren", indices[0])
self.text.tag_config("paren", self.HILITE_CONFIG)
def create_tag_expression(self, indices):
"""Highlight the entire expression"""
if self.text.get(indices[1]) in (')', ']', '}'):
rightindex = indices[1]+"+1c"
else:
rightindex = indices[1]
self.text.tag_add("paren", indices[0], rightindex)
self.text.tag_config("paren", self.HILITE_CONFIG)
# any one of the set_timeout_XXX methods can be used depending on
# the style
def set_timeout_none(self):
"""Highlight will remain until user input turns it off
or the insert has moved"""
# After CHECK_DELAY, call a function which disables the "paren" tag
# if the event is for the most recent timer and the insert has changed,
# or schedules another call for itself.
self.counter += 1
def callme(callme, self=self, c=self.counter,
index=self.text.index("insert")):
if index != self.text.index("insert"):
self.handle_restore_timer(c)
else:
self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
def set_timeout_last(self):
"""The last highlight created will be removed after .5 sec"""
# associate a counter with an event; only disable the "paren"
# tag if the event is for the most recent timer.
self.counter += 1
self.editwin.text_frame.after(
self.FLASH_DELAY,
lambda self=self, c=self.counter: self.handle_restore_timer(c))
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_parenmatch', verbosity=2)
| mit |
kbrebanov/ansible | lib/ansible/plugins/cliconf/ironware.py | 19 | 2749 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'ironware'
reply = self.send_command(b'show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'IronWare : Version (\S+),', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'^(?:System Mode\:|System\:) (CES|CER|MLX|XMR)', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
return device_info
@enable_mode
def get_config(self, source='running', flags=None):
if source not in ('running', 'startup'):
return self.invalid_params("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = b'show running-config'
if flags is not None:
cmd += ' ' + ' '.join(flags)
else:
cmd = b'show configuration'
if flags is not None:
return self.invalid_params("flags are only supported with running-config")
return self.send_command(cmd)
@enable_mode
def edit_config(self, command):
for cmd in chain([b'configure terminal'], to_list(command), [b'end']):
self.send_command(cmd)
def get(self, command, prompt=None, answer=None, sendonly=False):
return self.send_command(command, prompt=prompt, answer=answer, sendonly=sendonly)
def get_capabilities(self):
result = {}
result['rpc'] = self.get_base_rpc()
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
return json.dumps(result)
| gpl-3.0 |
LinguList/pyjs-seminar | website/code/align.py | 2 | 2136 | # author : Johann-Mattis List
# email : mattis.list@uni-marburg.de
# created : 2015-07-11 09:19
# modified : 2015-07-11 09:19
"""
Wagner-Fisher Algorithmus in Python
"""
__author__="Johann-Mattis List"
__date__="2015-07-11"
def wf_align(seqA, seqB):
"""
Align two sequences using the Wagner-Fisher algorithm.
"""
# check for empty seqs
if not seqA or not seqB:
return
# store length of sequences
m = len(seqA)+1
n = len(seqB)+1
# create matrix and traceback
M = [[0 for i in range(n)] for j in range(m)]
T = [[0 for i in range(n)] for j in range(m)]
# initialize M and T
for i in range(m):
M[i][0] = i
for i in range(n):
M[0][i] = i
for i in range(1,m):
T[i][0] = 1
for i in range(1,n):
T[0][i] = 2
# start the main loop
for i in range(1,m):
for j in range(1,n):
# get the chars
charA = seqA[i-1]
charB = seqB[j-1]
# check identity
if charA == charB:
match = M[i-1][j-1]
else:
match = M[i-1][j-1] + 1
# get the gaps
gapA = M[i-1][j] + 1
gapB = M[i][j-1] + 1
# compare the stuff
if match <= gapA and match <= gapB:
M[i][j] = match
elif gapA <= gapB:
M[i][j] = gapA
T[i][j] = 1 # don't forget the traceback
else:
M[i][j] = gapB
T[i][j] = 2 # don't forget the traceback
# get the edit distance
ED = M[i][j]
# start the traceback
i,j = m-1,n-1
almA,almB = [],[]
while i > 0 or j > 0:
if T[i][j] == 0:
almA += [seqA[i-1]]
almB += [seqB[j-1]]
i -= 1
j -= 1
elif T[i][j] == 1:
almA += [seqA[i-1]]
almB += ["-"]
i -= 1
else:
almA += ["-"]
almB += [seqB[j-1]]
j -= 1
# reverse
almA = almA[::-1]
almB = almB[::-1]
return almA,almB,ED
| gpl-2.0 |
TimofeyFox/S7270_kernel | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
drod2169/Linux-3.10.x | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
malayaleecoder/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/python.py | 167 | 89406 | """ Python test discovery, setup and run of test functions. """
import fnmatch
import functools
import inspect
import re
import types
import sys
import py
import pytest
from _pytest._code.code import TerminalRepr
from _pytest.mark import MarkDecorator, MarkerError
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
if sys.version_info[:2] == (2, 6):
def isclass(object):
""" Return true if the object is a class. Overrides inspect.isclass for
python 2.6 because it will return True for objects which always return
something on __getattr__ calls (see #1035).
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
"""
return isinstance(object, (type, types.ClassType))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = '<' in raw_filename and '>' in raw_filename
if is_generated:
return False
# entry.path might point to an inexisting file, in which case it will
# alsso return a str object. see #1133
p = py.path.local(entry.path)
return p != cutdir1 and not p.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (isfunction(obj) or isfunction(get_real_func(obj))):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return _pytest._code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, '__test__', False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj)) and
safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return py._builtin._totext(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
if _PY3:
import codecs
def _escape_bytes(val):
"""
If val is pure ascii, returns it as a str(), otherwise escapes
into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in the string, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode('ascii')
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ''
else:
def _escape_bytes(val):
"""
In py2 bytes and str are the same type, so return it unchanged if it
is a full ascii string, otherwise escape it into its binary form.
"""
try:
return val.decode('ascii')
except UnicodeDecodeError:
return val.encode('string-escape')
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, bytes):
return _escape_bytes(val)
elif isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
elif _PY2 and isinstance(val, unicode):
# special case for python 2: if a unicode string is
# convertible to ascii, return it as an str() object instead
try:
return str(val)
except UnicodeError:
# fallthrough
pass
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
This helper produces a ``ExceptionInfo()`` object (see below).
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will now execute
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
.. autoclass:: _pytest._code.ExceptionInfo
:members:
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
pytest.fail("DID NOT RAISE {0}".format(expected_exception))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = _pytest._code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| mpl-2.0 |
jhiswin/idiil-closure-library | closure/bin/labs/code/generate_jsdoc.py | 222 | 4318 | #!/usr/bin/env python
#
# Copyright 2013 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to insert JsDoc before a function.
This script attempts to find the first function passed in to stdin, generate
JSDoc for it (with argument names and possibly return value), and inject
it in the string. This is intended to be used as a subprocess by editors
such as emacs and vi.
"""
import re
import sys
# Matches a typical Closure-style function definition.
_FUNCTION_REGEX = re.compile(r"""
# Start of line
^
# Indentation
(?P<indentation>[ ]*)
# Identifier (handling split across line)
(?P<identifier>\w+(\s*\.\s*\w+)*)
# "= function"
\s* = \s* function \s*
# opening paren
\(
# Function arguments
(?P<arguments>(?:\s|\w+|,)*)
# closing paren
\)
# opening bracket
\s* {
""", re.MULTILINE | re.VERBOSE)
def _MatchFirstFunction(script):
"""Match the first function seen in the script."""
return _FUNCTION_REGEX.search(script)
def _ParseArgString(arg_string):
"""Parse an argument string (inside parens) into parameter names."""
for arg in arg_string.split(','):
arg = arg.strip()
if arg:
yield arg
def _ExtractFunctionBody(script, indentation=0):
"""Attempt to return the function body."""
# Real extraction would require a token parser and state machines.
# We look for first bracket at the same level of indentation.
regex_str = r'{(.*?)^[ ]{%d}}' % indentation
function_regex = re.compile(regex_str, re.MULTILINE | re.DOTALL)
match = function_regex.search(script)
if match:
return match.group(1)
def _ContainsReturnValue(function_body):
"""Attempt to determine if the function body returns a value."""
return_regex = re.compile(r'\breturn\b[^;]')
# If this matches, we assume they're returning something.
return bool(return_regex.search(function_body))
def _InsertString(original_string, inserted_string, index):
"""Insert a string into another string at a given index."""
return original_string[0:index] + inserted_string + original_string[index:]
def _GenerateJsDoc(args, return_val=False):
"""Generate JSDoc for a function.
Args:
args: A list of names of the argument.
return_val: Whether the function has a return value.
Returns:
The JSDoc as a string.
"""
lines = []
lines.append('/**')
lines += [' * @param {} %s' % arg for arg in args]
if return_val:
lines.append(' * @return')
lines.append(' */')
return '\n'.join(lines) + '\n'
def _IndentString(source_string, indentation):
"""Indent string some number of characters."""
lines = [(indentation * ' ') + line
for line in source_string.splitlines(True)]
return ''.join(lines)
def InsertJsDoc(script):
"""Attempt to insert JSDoc for the first seen function in the script.
Args:
script: The script, as a string.
Returns:
Returns the new string if function was found and JSDoc inserted. Otherwise
returns None.
"""
match = _MatchFirstFunction(script)
if not match:
return
# Add argument flags.
args_string = match.group('arguments')
args = _ParseArgString(args_string)
start_index = match.start(0)
function_to_end = script[start_index:]
lvalue_indentation = len(match.group('indentation'))
return_val = False
function_body = _ExtractFunctionBody(function_to_end, lvalue_indentation)
if function_body:
return_val = _ContainsReturnValue(function_body)
jsdoc = _GenerateJsDoc(args, return_val)
if lvalue_indentation:
jsdoc = _IndentString(jsdoc, lvalue_indentation)
return _InsertString(script, jsdoc, start_index)
if __name__ == '__main__':
stdin_script = sys.stdin.read()
result = InsertJsDoc(stdin_script)
if result:
sys.stdout.write(result)
else:
sys.stdout.write(stdin_script)
| apache-2.0 |
aaltinisik/OCBAltinkaya | addons/account_followup/tests/test_account_followup.py | 247 | 10222 | import datetime
from openerp import tools
from openerp.tests.common import TransactionCase
from openerp.osv import fields
class TestAccountFollowup(TransactionCase):
def setUp(self):
""" setUp ***"""
super(TestAccountFollowup, self).setUp()
cr, uid = self.cr, self.uid
self.user = self.registry('res.users')
self.user_id = self.user.browse(cr, uid, uid)
self.partner = self.registry('res.partner')
self.invoice = self.registry('account.invoice')
self.invoice_line = self.registry('account.invoice.line')
self.wizard = self.registry('account_followup.print')
self.followup_id = self.registry('account_followup.followup')
self.partner_id = self.partner.create(cr, uid, {'name':'Test Company',
'email':'test@localhost',
'is_company': True,
},
context=None)
self.followup_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account_followup", "demo_followup1")[1]
self.account_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "a_recv")[1]
self.journal_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "bank_journal")[1]
self.pay_account_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "cash")[1]
self.period_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "period_10")[1]
self.first_followup_line_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account_followup", "demo_followup_line1")[1]
self.last_followup_line_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account_followup", "demo_followup_line3")[1]
self.product_id = self.registry("ir.model.data").get_object_reference(cr, uid, "product", "product_product_6")[1]
self.invoice_id = self.invoice.create(cr, uid, {'partner_id': self.partner_id,
'account_id': self.account_id,
'journal_id': self.journal_id,
'invoice_line': [(0, 0, {
'name': "LCD Screen",
'product_id': self.product_id,
'quantity': 5,
'price_unit':200
})]})
self.registry('account.invoice').signal_workflow(cr, uid, [self.invoice_id], 'invoice_open')
self.voucher = self.registry("account.voucher")
self.current_date = datetime.datetime.strptime(fields.date.context_today(self.user, cr, uid, context={}), tools.DEFAULT_SERVER_DATE_FORMAT)
def test_00_send_followup_after_3_days(self):
""" Send follow up after 3 days and check nothing is done (as first follow-up level is only after 15 days)"""
cr, uid = self.cr, self.uid
delta = datetime.timedelta(days=3)
result = self.current_date + delta
self.wizard_id = self.wizard.create(cr, uid, {'date':result.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'followup_id': self.followup_id
}, context={"followup_id": self.followup_id})
self.wizard.do_process(cr, uid, [self.wizard_id], context={"followup_id": self.followup_id})
self.assertFalse(self.partner.browse(cr, uid, self.partner_id).latest_followup_level_id)
def run_wizard_three_times(self):
cr, uid = self.cr, self.uid
delta = datetime.timedelta(days=40)
result = self.current_date + delta
result = self.current_date + delta
self.wizard_id = self.wizard.create(cr, uid, {'date':result.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'followup_id': self.followup_id
}, context={"followup_id": self.followup_id})
self.wizard.do_process(cr, uid, [self.wizard_id], context={"followup_id": self.followup_id})
self.wizard_id = self.wizard.create(cr, uid, {'date':result.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'followup_id': self.followup_id
}, context={"followup_id": self.followup_id})
self.wizard.do_process(cr, uid, [self.wizard_id], context={"followup_id": self.followup_id})
self.wizard_id = self.wizard.create(cr, uid, {'date':result.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'followup_id': self.followup_id,
}, context={"followup_id": self.followup_id})
self.wizard.do_process(cr, uid, [self.wizard_id], context={"followup_id": self.followup_id})
def test_01_send_followup_later_for_upgrade(self):
""" Send one follow-up after 15 days to check it upgrades to level 1"""
cr, uid = self.cr, self.uid
delta = datetime.timedelta(days=15)
result = self.current_date + delta
self.wizard_id = self.wizard.create(cr, uid, {
'date':result.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'followup_id': self.followup_id
}, context={"followup_id": self.followup_id})
self.wizard.do_process(cr, uid, [self.wizard_id], context={"followup_id": self.followup_id})
self.assertEqual(self.partner.browse(cr, uid, self.partner_id).latest_followup_level_id.id, self.first_followup_line_id,
"Not updated to the correct follow-up level")
def test_02_check_manual_action(self):
""" Check that when running the wizard three times that the manual action is set"""
cr, uid = self.cr, self.uid
self.run_wizard_three_times()
self.assertEqual(self.partner.browse(cr, uid, self.partner_id).payment_next_action,
"Call the customer on the phone! ", "Manual action not set")
self.assertEqual(self.partner.browse(cr, uid, self.partner_id).payment_next_action_date,
self.current_date.strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
def test_03_filter_on_credit(self):
""" Check the partners can be filtered on having credits """
cr, uid = self.cr, self.uid
ids = self.partner.search(cr, uid, [('payment_amount_due', '>', 0.0)])
self.assertIn(self.partner_id, ids)
def test_04_action_done(self):
""" Run the wizard 3 times, mark it as done, check the action fields are empty"""
cr, uid = self.cr, self.uid
partner_rec = self.partner.browse(cr, uid, self.partner_id)
self.run_wizard_three_times()
self.partner.action_done(cr, uid, self.partner_id)
self.assertFalse(partner_rec.payment_next_action, "Manual action not emptied")
self.assertFalse(partner_rec.payment_responsible_id)
self.assertFalse(partner_rec.payment_next_action_date)
def test_05_litigation(self):
""" Set the account move line as litigation, run the wizard 3 times and check nothing happened.
Turn litigation off. Run the wizard 3 times and check it is in the right follow-up level.
"""
cr, uid = self.cr, self.uid
aml_id = self.partner.browse(cr, uid, self.partner_id).unreconciled_aml_ids[0].id
self.registry('account.move.line').write(cr, uid, aml_id, {'blocked': True})
self.run_wizard_three_times()
self.assertFalse(self.partner.browse(cr, uid, self.partner_id).latest_followup_level_id, "Litigation does not work")
self.registry('account.move.line').write(cr, uid, aml_id, {'blocked': False})
self.run_wizard_three_times()
self.assertEqual(self.partner.browse(cr, uid, self.partner_id).latest_followup_level_id.id,
self.last_followup_line_id, "Lines are not equal")
def test_06_pay_the_invoice(self):
"""Run wizard until manual action, pay the invoice and check that partner has no follow-up level anymore and after running the wizard the action is empty"""
cr, uid = self.cr, self.uid
self.test_02_check_manual_action()
delta = datetime.timedelta(days=1)
result = self.current_date + delta
self.invoice.pay_and_reconcile(cr, uid, [self.invoice_id], 1000.0, self.pay_account_id,
self.period_id, self.journal_id, self.pay_account_id,
self.period_id, self.journal_id,
name = "Payment for test customer invoice follow-up")
self.assertFalse(self.partner.browse(cr, uid, self.partner_id).latest_followup_level_id, "Level not empty")
self.wizard_id = self.wizard.create(cr, uid, {'date':result.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'followup_id': self.followup_id
}, context={"followup_id": self.followup_id})
self.wizard.do_process(cr, uid, [self.wizard_id], context={"followup_id": self.followup_id})
self.assertEqual(0, self.partner.browse(cr, uid, self.partner_id).payment_amount_due, "Amount Due != 0")
self.assertFalse(self.partner.browse(cr, uid, self.partner_id).payment_next_action_date, "Next action date not cleared")
| agpl-3.0 |
ubirch/aws-tools | virtual-env/lib/python2.7/site-packages/boto/elastictranscoder/exceptions.py | 184 | 1595 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class LimitExceededException(JSONResponseError):
pass
class ResourceInUseException(JSONResponseError):
pass
class AccessDeniedException(JSONResponseError):
pass
class ResourceNotFoundException(JSONResponseError):
pass
class InternalServiceException(JSONResponseError):
pass
class ValidationException(JSONResponseError):
pass
class IncompatibleVersionException(JSONResponseError):
pass
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/prompt_toolkit/styles/from_dict.py | 23 | 4579 | """
Tool for creating styles from a dictionary.
This is very similar to the Pygments style dictionary, with some additions:
- Support for reverse and blink.
- Support for ANSI color names. (These will map directly to the 16 terminal
colors.)
"""
from collections import Mapping
from .base import Style, DEFAULT_ATTRS, ANSI_COLOR_NAMES
from .defaults import DEFAULT_STYLE_EXTENSIONS
from .utils import merge_attrs, split_token_in_parts
from six.moves import range
__all__ = (
'style_from_dict',
)
def _colorformat(text):
"""
Parse/validate color format.
Like in Pygments, but also support the ANSI color names.
(These will map to the colors of the 16 color palette.)
"""
if text[0:1] == '#':
col = text[1:]
if col in ANSI_COLOR_NAMES:
return col
elif len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return text
raise ValueError('Wrong color format %r' % text)
def style_from_dict(style_dict, include_defaults=True):
"""
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
"""
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
# Expand token inheritance and turn style description into Attrs.
token_to_attrs = {}
# (Loop through the tokens in order. Sorting makes sure that
# we process the parent first.)
for ttype, styledef in sorted(style_dict.items()):
# Start from parent Attrs or default Attrs.
attrs = DEFAULT_ATTRS
if 'noinherit' not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
# Now update with the given attributes.
for part in styledef.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs)
class _StyleFromDict(Style):
"""
Turn a dictionary that maps `Token` to `Attrs` into a style class.
:param token_to_attrs: Dictionary that maps `Token` to `Attrs`.
"""
def __init__(self, token_to_attrs):
self.token_to_attrs = token_to_attrs
def get_attrs_for_token(self, token):
# Split Token.
list_of_attrs = []
for token in split_token_in_parts(token):
list_of_attrs.append(self.token_to_attrs.get(token, DEFAULT_ATTRS))
return merge_attrs(list_of_attrs)
def invalidation_hash(self):
return id(self.token_to_attrs)
| gpl-3.0 |
krader1961/python-mode | pymode/lint.py | 6 | 2211 | """Pylama integration."""
from .environment import env
from .utils import silence_stderr
import os.path
from pylama.lint.extensions import LINTERS
try:
from pylama.lint.pylama_pylint import Linter
LINTERS['pylint'] = Linter()
except Exception: # noqa
pass
def code_check():
"""Run pylama and check current file.
:return bool:
"""
with silence_stderr():
from pylama.core import run
from pylama.main import parse_options
if not env.curbuf.name:
return env.stop()
linters = env.var('g:pymode_lint_checkers')
env.debug(linters)
options = parse_options(
linters=linters, force=1,
ignore=env.var('g:pymode_lint_ignore'),
select=env.var('g:pymode_lint_select'),
)
for linter in linters:
opts = env.var('g:pymode_lint_options_%s' % linter, silence=True)
if opts:
options.linters_params[linter] = options.linters_params.get(linter, {})
options.linters_params[linter].update(opts)
env.debug(options)
path = os.path.relpath(env.curbuf.name, env.curdir)
env.debug("Start code check: ", path)
if getattr(options, 'skip', None) and any(p.match(path) for p in options.skip): # noqa
env.message('Skip code checking.')
env.debug("Skipped")
return env.stop()
if env.options.get('debug'):
from pylama.core import LOGGER, logging
LOGGER.setLevel(logging.DEBUG)
errors = run(path, code='\n'.join(env.curbuf) + '\n', options=options)
env.debug("Find errors: ", len(errors))
sort_rules = env.var('g:pymode_lint_sort')
def __sort(e):
try:
return sort_rules.index(e.get('type'))
except ValueError:
return 999
if sort_rules:
env.debug("Find sorting: ", sort_rules)
errors = sorted(errors, key=__sort)
for e in errors:
e._info['bufnr'] = env.curbuf.number
if e._info['col'] is None:
e._info['col'] = 1
env.run('g:PymodeLocList.current().extend', [e._info for e in errors])
# pylama:ignore=W0212,E1103
| lgpl-3.0 |
ClearCorp/server-tools | base_import_match/tests/test_import.py | 2 | 2691 | # -*- coding: utf-8 -*-
# Copyright 2016 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis
# Copyright 2016 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from os import path
from openerp.tests.common import TransactionCase
PATH = path.join(path.dirname(__file__), "import_data", "%s.csv")
OPTIONS = {
"headers": True,
"quoting": '"',
"separator": ",",
}
class ImportCase(TransactionCase):
def _base_import_record(self, res_model, file_name):
"""Create and return a ``base_import.import`` record."""
with open(PATH % file_name) as demo_file:
return self.env["base_import.import"].create({
"res_model": res_model,
"file": demo_file.read(),
"file_name": "%s.csv" % file_name,
"file_type": "csv",
})
def test_res_partner_vat(self):
"""Change name based on VAT."""
agrolait = self.env.ref("base.res_partner_2")
agrolait.vat = "BE0477472701"
record = self._base_import_record("res.partner", "res_partner_vat")
record.do(["name", "vat", "is_company"], OPTIONS)
agrolait.env.invalidate_all()
self.assertEqual(agrolait.name, "Agrolait Changed")
def test_res_partner_parent_name_is_company(self):
"""Change email based on parent_id, name and is_company."""
record = self._base_import_record(
"res.partner", "res_partner_parent_name_is_company")
record.do(["name", "is_company", "parent_id/id", "email"], OPTIONS)
self.assertEqual(
self.env.ref("base.res_partner_address_4").email,
"changed@agrolait.example.com")
def test_res_partner_email(self):
"""Change name based on email."""
record = self._base_import_record("res.partner", "res_partner_email")
record.do(["email", "name"], OPTIONS)
self.assertEqual(
self.env.ref("base.res_partner_address_4").name,
"Michel Fletcher Changed")
def test_res_partner_name(self):
"""Change function based on name."""
record = self._base_import_record("res.partner", "res_partner_name")
record.do(["function", "name"], OPTIONS)
self.assertEqual(
self.env.ref("base.res_partner_address_4").function,
"Function Changed")
def test_res_users_login(self):
"""Change name based on login."""
record = self._base_import_record("res.users", "res_users_login")
record.do(["login", "name"], OPTIONS)
self.assertEqual(
self.env.ref("base.user_demo").name,
"Demo User Changed")
| agpl-3.0 |
guschmue/tensorflow | tensorflow/python/tools/optimize_for_inference_test.py | 34 | 13033 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
def testFoldBatchNorms(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 8
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
def testFoldFusedBatchNorms(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 9
gen_nn_ops._fused_batch_norm(
conv_op,
gamma_op,
beta_op,
mean_op,
variance_op,
0.00001,
is_training=False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(
original_result, optimized_result, rtol=1e-04, atol=1e-06)
for node in optimized_graph_def.node:
self.assertNotEqual("FusedBatchNorm", node.op)
def testFuseResizePadAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
def testFuseResizeAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
def testFusePadAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/223_test_descr.py | 1 | 90901 | # Test enhancements related to descriptors and new-style classes
from test_support import verify, vereq, verbose, TestFailed, TESTFN, get_original_stdout
from copy import deepcopy
import warnings
warnings.filterwarnings("ignore",
r'complex divmod\(\), // and % are deprecated$',
DeprecationWarning, r'(<string>|test_descr)$')
def veris(a, b):
if a is not b:
raise TestFailed, "%r is %r" % (a, b)
def testunop(a, res, expr="len(a)", meth="__len__"):
if verbose: print "checking", expr
dict = {'a': a}
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a), res)
bm = getattr(a, meth)
vereq(bm(), res)
def testbinop(a, b, res, expr="a+b", meth="__add__"):
if verbose: print "checking", expr
dict = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a, b), res)
bm = getattr(a, meth)
vereq(bm(b), res)
def testternop(a, b, c, res, expr="a[b:c]", meth="__getslice__"):
if verbose: print "checking", expr
dict = {'a': a, 'b': b, 'c': c}
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a, b, c), res)
bm = getattr(a, meth)
vereq(bm(b, c), res)
def testsetop(a, b, res, stmt="a+=b", meth="__iadd__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b)
vereq(dict['a'], res)
def testset2op(a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b, 'c': c}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b, c)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b, c)
vereq(dict['a'], res)
def testset3op(a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b, c, d)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b, c, d)
vereq(dict['a'], res)
def class_docstrings():
class Classic:
"A classic docstring."
vereq(Classic.__doc__, "A classic docstring.")
vereq(Classic.__dict__['__doc__'], "A classic docstring.")
class Classic2:
pass
verify(Classic2.__doc__ is None)
class NewStatic(object):
"Another docstring."
vereq(NewStatic.__doc__, "Another docstring.")
vereq(NewStatic.__dict__['__doc__'], "Another docstring.")
class NewStatic2(object):
pass
verify(NewStatic2.__doc__ is None)
class NewDynamic(object):
"Another docstring."
vereq(NewDynamic.__doc__, "Another docstring.")
vereq(NewDynamic.__dict__['__doc__'], "Another docstring.")
class NewDynamic2(object):
pass
verify(NewDynamic2.__doc__ is None)
def lists():
if verbose: print "Testing list operations..."
testbinop([1], [2], [1,2], "a+b", "__add__")
testbinop([1,2,3], 2, 1, "b in a", "__contains__")
testbinop([1,2,3], 4, 0, "b in a", "__contains__")
testbinop([1,2,3], 1, 2, "a[b]", "__getitem__")
testternop([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__")
testsetop([1], [2], [1,2], "a+=b", "__iadd__")
testsetop([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
testunop([1,2,3], 3, "len(a)", "__len__")
testbinop([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
testbinop([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
testset2op([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
testset3op([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__")
def dicts():
if verbose: print "Testing dict operations..."
testbinop({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__")
testbinop({1:2,3:4}, 1, 1, "b in a", "__contains__")
testbinop({1:2,3:4}, 2, 0, "b in a", "__contains__")
testbinop({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2,3:4}
l1 = []
for i in d.keys(): l1.append(i)
l = []
for i in iter(d): l.append(i)
vereq(l, l1)
l = []
for i in d.__iter__(): l.append(i)
vereq(l, l1)
l = []
for i in dict.__iter__(d): l.append(i)
vereq(l, l1)
d = {1:2, 3:4}
testunop(d, 2, "len(a)", "__len__")
vereq(eval(repr(d), {}), d)
vereq(eval(d.__repr__(), {}), d)
testset2op({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__")
def dict_constructor():
if verbose:
print "Testing dict constructor ..."
d = dict()
vereq(d, {})
d = dict({})
vereq(d, {})
d = dict(items={})
vereq(d, {})
d = dict({1: 2, 'a': 'b'})
vereq(d, {1: 2, 'a': 'b'})
vereq(d, dict(d.items()))
vereq(d, dict(items=d.iteritems()))
for badarg in 0, 0L, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
raise TestFailed("no TypeError from dict(%r)" % badarg)
else:
raise TestFailed("no TypeError from dict(%r)" % badarg)
try:
dict(senseless={})
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict(senseless={})")
try:
dict({}, {})
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: self.dict.keys()
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(items=Mapping())
vereq(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
vereq(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
vereq(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
raise TestFailed("no ValueError from dict(%r)" % bad)
def test_dir():
if verbose:
print "Testing dir() ..."
junk = 12
vereq(dir(), ['junk'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, test_dir:
dir(arg)
# Try classic classes.
class C:
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__']
vereq(dir(C), cstuff)
verify('im_self' in dir(C.Cmethod))
c = C() # c.__doc__ is an odd thing to see here; ditto c.__module__.
vereq(dir(c), cstuff)
c.cdata = 2
c.cmethod = lambda self: 0
vereq(dir(c), cstuff + ['cdata', 'cmethod'])
verify('im_self' in dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
vereq(dir(A), astuff)
verify('im_self' in dir(A.Amethod))
a = A()
vereq(dir(a), astuff)
verify('im_self' in dir(a.Amethod))
a.adata = 42
a.amethod = lambda self: 3
vereq(dir(a), astuff + ['adata', 'amethod'])
# The same, but with new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
vereq(interesting(dir(C)), cstuff)
c = C()
vereq(interesting(dir(c)), cstuff)
verify('im_self' in dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
vereq(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
verify('im_self' in dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
vereq(interesting(dir(A)), astuff)
verify('im_self' in dir(A.Amethod))
a = A()
vereq(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
vereq(interesting(dir(a)), astuff + ['adata', 'amethod'])
verify('im_self' in dir(a.Amethod))
# Try a module subclass.
import sys
class M(type(sys)):
pass
minstance = M()
minstance.b = 2
minstance.a = 1
vereq(dir(minstance), ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2()
m2instance.b = 2
m2instance.a = 1
vereq(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
vereq(dir(None), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'mod': '%',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in binops.items():
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
binops[name] = expr
unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'long': 'long',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in unops.items():
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
unops[name] = expr
def numops(a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in binops.items():
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
testbinop(a, b, res, expr, name)
for name, expr in unops.items():
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
testunop(a, res, expr, name)
def ints():
if verbose: print "Testing int operations..."
numops(100, 3)
# The following crashes in Python 2.2
vereq((1).__nonzero__(), 1)
vereq((0).__nonzero__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
try:
C() + ""
except TypeError:
pass
else:
raise TestFailed, "NotImplemented should have caused TypeError"
def longs():
if verbose: print "Testing long operations..."
numops(100L, 3L)
def floats():
if verbose: print "Testing float operations..."
numops(100.0, 3.0)
def complexes():
if verbose: print "Testing complex operations..."
numops(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge', 'int', 'long', 'float'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
vereq(`a`, "3.14")
vereq(a.prec, 6)
a = Number(a, prec=2)
vereq(`a`, "3.1")
vereq(a.prec, 2)
a = Number(234.5)
vereq(`a`, "234.5")
vereq(a.prec, 12)
def spamlists():
if verbose: print "Testing spamlist operations..."
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
testbinop(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b", "__add__")
testbinop(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
testbinop(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
testbinop(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
testternop(spamlist([1,2,3]), 0, 2, spamlist([1,2]),
"a[b:c]", "__getslice__")
testsetop(spamlist([1]), spamlist([2]), spamlist([1,2]),
"a+=b", "__iadd__")
testsetop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b", "__imul__")
testunop(spamlist([1,2,3]), 3, "len(a)", "__len__")
testbinop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b", "__mul__")
testbinop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a", "__rmul__")
testset2op(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c", "__setitem__")
testset3op(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setslice__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
vereq(a, [])
vereq(a.foo(), 1)
a.append(100)
vereq(a, [100])
vereq(a.getstate(), 0)
a.setstate(42)
vereq(a.getstate(), 42)
def spamdicts():
if verbose: print "Testing spamdict operations..."
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in d.items(): sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
testbinop(spamdict({1:2}), spamdict({2:1}), -1, "cmp(a,b)", "__cmp__")
testbinop(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
testbinop(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
testbinop(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in d.keys(): l1.append(i)
l = []
for i in iter(d): l.append(i)
vereq(l, l1)
l = []
for i in d.__iter__(): l.append(i)
vereq(l, l1)
l = []
for i in type(spamdict({})).__iter__(d): l.append(i)
vereq(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
testunop(spamd, 2, "len(a)", "__len__")
testunop(spamd, repr(straightd), "repr(a)", "__repr__")
testset2op(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
vereq(a.items(), [])
vereq(a.foo(), 1)
a['foo'] = 'bar'
vereq(a.items(), [('foo', 'bar')])
vereq(a.getstate(), 0)
a.setstate(100)
vereq(a.getstate(), 100)
def pydicts():
if verbose: print "Testing Python subclass of dict..."
verify(issubclass(dict, dict))
verify(isinstance({}, dict))
d = dict()
vereq(d, {})
verify(d.__class__ is dict)
verify(isinstance(d, dict))
class C(dict):
state = -1
def __init__(self, *a, **kw):
if a:
vereq(len(a), 1)
self.state = a[0]
if kw:
for k, v in kw.items(): self[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self, key, value):
verify(isinstance(key, type(0)))
dict.__setitem__(self, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
verify(issubclass(C, dict))
a1 = C(12)
vereq(a1.state, 12)
a2 = C(foo=1, bar=2)
vereq(a2[1] == 'foo' and a2[2], 'bar')
a = C()
vereq(a.state, -1)
vereq(a.getstate(), -1)
a.setstate(0)
vereq(a.state, 0)
vereq(a.getstate(), 0)
a.setstate(10)
vereq(a.state, 10)
vereq(a.getstate(), 10)
vereq(a[42], 0)
a[42] = 24
vereq(a[42], 24)
if verbose: print "pydict stress test ..."
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
vereq(a[i][j], i*j)
def pylists():
if verbose: print "Testing Python subclass of list..."
class C(list):
def __getitem__(self, i):
return list.__getitem__(self, i) + 100
def __getslice__(self, i, j):
return (i, j)
a = C()
a.extend([0,1,2])
vereq(a[0], 100)
vereq(a[1], 101)
vereq(a[2], 102)
vereq(a[100:200], (100,200))
def metaclass():
if verbose: print "Testing __metaclass__..."
class C:
__metaclass__ = type
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
vereq(a.getstate(), 0)
a.setstate(10)
vereq(a.getstate(), 10)
class D:
class __metaclass__(type):
def myself(cls): return cls
vereq(D.myself(), D)
d = D()
verify(d.__class__ is D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C:
__metaclass__ = M1
vereq(C.__spam__, 1)
c = C()
vereq(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
__new__ = staticmethod(__new__)
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C:
__metaclass__ = M2
def spam(self):
return 42
vereq(C.name, 'C')
vereq(C.bases, ())
verify('spam' in C.dict)
c = C()
vereq(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A:
__metaclass__ = autosuper
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
vereq(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
vereq(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.iteritems():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.iteritems():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A:
__metaclass__ = autoproperty
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
verify(not hasattr(a, "x"))
a.x = 12
vereq(a.x, 12)
vereq(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A:
__metaclass__ = multimetaclass
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
vereq(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C:
__metaclass__ = T
vereq(T.counter, 1)
a = C()
vereq(type(a), C)
vereq(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: raise TestError, "calling object w/o call method should raise TypeError"
def pymods():
if verbose: print "Testing Python subclass of module..."
log = []
import sys
MT = type(sys)
class MM(MT):
def __init__(self):
MT.__init__(self)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM()
a.foo = 12
x = a.foo
del a.foo
vereq(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
def multi():
if verbose: print "Testing multiple inheritance..."
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
vereq(a.getstate(), 0)
a.setstate(10)
vereq(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
vereq(d.keys(), [])
d["hello"] = "world"
vereq(d.items(), [("hello", "world")])
vereq(d["hello"], "world")
vereq(d.getstate(), 0)
d.setstate(10)
vereq(d.getstate(), 10)
vereq(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
vereq(Node().__int__(), 23)
vereq(int(Node()), 23)
vereq(Frag().__int__(), 42)
vereq(int(Frag()), 42)
# MI mixing classic and new-style classes.
class A:
x = 1
class B(A):
pass
class C(A):
x = 2
class D(B, C):
pass
vereq(D.x, 1)
# Classic MRO is preserved for a classic base class.
class E(D, object):
pass
vereq(E.__mro__, (E, D, B, A, C, object))
vereq(E.x, 1)
# But with a mix of classic bases, their MROs are combined using
# new-style MRO.
class F(B, C, object):
pass
vereq(F.__mro__, (F, B, C, A, object))
vereq(F.x, 2)
# Try something else.
class C:
def cmethod(self):
return "C a"
def all_method(self):
return "C b"
class M1(C, object):
def m1method(self):
return "M1 a"
def all_method(self):
return "M1 b"
vereq(M1.__mro__, (M1, C, object))
m = M1()
vereq(m.cmethod(), "C a")
vereq(m.m1method(), "M1 a")
vereq(m.all_method(), "M1 b")
class D(C):
def dmethod(self):
return "D a"
def all_method(self):
return "D b"
class M2(object, D):
def m2method(self):
return "M2 a"
def all_method(self):
return "M2 b"
vereq(M2.__mro__, (M2, object, D, C))
m = M2()
vereq(m.cmethod(), "C a")
vereq(m.dmethod(), "D a")
vereq(m.m2method(), "M2 a")
vereq(m.all_method(), "M2 b")
class M3(M1, object, M2):
def m3method(self):
return "M3 a"
def all_method(self):
return "M3 b"
# XXX Expected this (the commented-out result):
# vereq(M3.__mro__, (M3, M1, M2, object, D, C))
vereq(M3.__mro__, (M3, M1, M2, D, C, object)) # XXX ?
m = M3()
vereq(m.cmethod(), "C a")
vereq(m.dmethod(), "D a")
vereq(m.m1method(), "M1 a")
vereq(m.m2method(), "M2 a")
vereq(m.m3method(), "M3 a")
vereq(m.all_method(), "M3 b")
class Classic:
pass
try:
class New(Classic):
__metaclass__ = type
except TypeError:
pass
else:
raise TestFailed, "new class with only classic bases - shouldn't be"
def diamond():
if verbose: print "Testing multiple inheritance special cases..."
class A(object):
def spam(self): return "A"
vereq(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
vereq(B().spam(), "B")
vereq(B().boo(), "B")
class C(A):
def boo(self): return "C"
vereq(C().spam(), "A")
vereq(C().boo(), "C")
class D(B, C): pass
vereq(D().spam(), "B")
vereq(D().boo(), "B")
vereq(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
vereq(E().spam(), "B")
vereq(E().boo(), "C")
vereq(E.__mro__, (E, C, B, A, object))
class F(D, E): pass
vereq(F().spam(), "B")
vereq(F().boo(), "B")
vereq(F.__mro__, (F, D, E, B, C, A, object))
class G(E, D): pass
vereq(G().spam(), "B")
vereq(G().boo(), "C")
vereq(G.__mro__, (G, E, D, C, B, A, object))
def objects():
if verbose: print "Testing object class..."
a = object()
vereq(a.__class__, object)
vereq(type(a), object)
b = object()
verify(a is not b)
verify(not hasattr(a, "foo"))
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
verify(0, "object() should not allow setting a foo attribute")
verify(not hasattr(object(), "__dict__"))
class Cdict(object):
pass
x = Cdict()
vereq(x.__dict__, {})
x.foo = 1
vereq(x.foo, 1)
vereq(x.__dict__, {'foo': 1})
def slots():
if verbose: print "Testing __slots__..."
class C0(object):
__slots__ = []
x = C0()
verify(not hasattr(x, "__dict__"))
verify(not hasattr(x, "foo"))
class C1(object):
__slots__ = ['a']
x = C1()
verify(not hasattr(x, "__dict__"))
verify(not hasattr(x, "a"))
x.a = 1
vereq(x.a, 1)
x.a = None
veris(x.a, None)
del x.a
verify(not hasattr(x, "a"))
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
verify(not hasattr(x, "__dict__"))
verify(not hasattr(x, 'a'))
verify(not hasattr(x, 'b'))
verify(not hasattr(x, 'c'))
x.a = 1
x.b = 2
x.c = 3
vereq(x.a, 1)
vereq(x.b, 2)
vereq(x.c, 3)
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
vereq(Counted.counter, 3)
del x
vereq(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
vereq(Counted.counter, 2)
del x
vereq(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
vereq(Counted.counter, 3)
del x
vereq(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
log = []
s = F()
s.a = [Counted(), s]
vereq(Counted.counter, 1)
s = None
import gc
gc.collect()
vereq(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
import sys,gc
class G(object):
def __cmp__(self, other):
return 0
g = G()
orig_objects = len(gc.get_objects())
for i in xrange(10):
g==g
new_objects = len(gc.get_objects())
vereq(orig_objects, new_objects)
def dynamics():
if verbose: print "Testing class attribute propagation..."
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
vereq(D.foo, 1)
# Test that dynamic attributes are inherited
vereq(E.foo, 1)
vereq(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
verify(not hasattr(a, "foobar"))
C.foobar = 2
vereq(a.foobar, 2)
C.method = lambda self: 42
vereq(a.method(), 42)
C.__repr__ = lambda self: "C()"
vereq(repr(a), "C()")
C.__int__ = lambda self: 100
vereq(int(a), 100)
vereq(a.foobar, 2)
verify(not hasattr(a, "spam"))
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
vereq(a.spam, "spam")
a.new = 12
vereq(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
verify(0, "expected AttributeError")
vereq(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
vereq(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
vereq("a"*I(2), "aa")
vereq(I(2)*"a", "aa")
vereq(2*I(3), 6)
vereq(I(3)*2, 6)
vereq(I(3)*I(2), 6)
# Test handling of long*seq and seq*long
class L(long):
pass
vereq("a"*L(2L), "aa")
vereq(L(2L)*"a", "aa")
vereq(2*L(3), 6)
vereq(L(3)*2, 6)
vereq(L(3)*L(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass:
__metaclass__ = dynamicmetaclass
verify(someclass != object)
def errors():
if verbose: print "Testing errors..."
try:
class C(list, dict):
pass
except TypeError:
pass
else:
verify(0, "inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
verify(0, "inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
verify(0, "inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
verify(0, "__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
verify(0, "__slots__ = [1] should be illegal")
def classmethods():
if verbose: print "Testing class methods..."
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
vereq(C.goo(1), (C, 1))
vereq(c.goo(1), (C, 1))
vereq(c.foo(1), (c, 1))
class D(C):
pass
d = D()
vereq(D.goo(1), (D, 1))
vereq(d.goo(1), (D, 1))
vereq(d.foo(1), (d, 1))
vereq(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
vereq(ff.__get__(0, int)(42), (int, 42))
vereq(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
veris(C.goo.im_self, C)
veris(D.goo.im_self, D)
veris(super(D,D).goo.im_self, D)
veris(super(D,d).goo.im_self, D)
vereq(super(D,D).goo(), (D,))
vereq(super(D,d).goo(), (D,))
def staticmethods():
if verbose: print "Testing static methods..."
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
vereq(C.goo(1), (1,))
vereq(c.goo(1), (1,))
vereq(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
vereq(D.goo(1), (1,))
vereq(d.goo(1), (1,))
vereq(d.foo(1), (d, 1))
vereq(D.foo(d, 1), (d, 1))
def classic():
if verbose: print "Testing classic classes..."
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
vereq(C.goo(1), (C, 1))
vereq(c.goo(1), (C, 1))
vereq(c.foo(1), (c, 1))
class D(C):
pass
d = D()
vereq(D.goo(1), (D, 1))
vereq(d.goo(1), (D, 1))
vereq(d.foo(1), (d, 1))
vereq(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
vereq(E().foo, C.foo) # i.e., unbound
verify(repr(C.foo.__get__(C())).startswith("<bound method "))
def compattr():
if verbose: print "Testing computed attributes..."
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
vereq(a.x, 0)
vereq(a.x, 1)
a.x = 10
vereq(a.x, 10)
vereq(a.x, 11)
del a.x
vereq(hasattr(a, 'x'), 0)
def newslot():
if verbose: print "Testing __new__ slot override..."
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
vereq(a.foo, 3)
verify(a.__class__ is C)
class D(C):
pass
b = D()
vereq(b.foo, 3)
verify(b.__class__ is D)
def altmro():
if verbose: print "Testing mro() and overriding it..."
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
vereq(D.mro(), [D, B, C, A, object])
vereq(D.__mro__, (D, B, C, A, object))
vereq(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(A,B,C,D):
__metaclass__ = PerverseMetaType
vereq(X.__mro__, (object, A, C, B, D, X))
vereq(X().f(), "A")
def overloading():
if verbose: print "Testing operator overloading..."
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
def __getslice__(self, i, j):
return ("getslice", i, j)
def __setslice__(self, i, j, value):
self.setslice = (i, j, value)
def __delslice__(self, i, j):
self.delslice = (i, j)
a = C()
vereq(a.foo, ("getattr", "foo"))
a.foo = 12
vereq(a.setattr, ("foo", 12))
del a.foo
vereq(a.delattr, "foo")
vereq(a[12], ("getitem", 12))
a[12] = 21
vereq(a.setitem, (12, 21))
del a[12]
vereq(a.delitem, 12)
vereq(a[0:10], ("getslice", 0, 10))
a[0:10] = "foo"
vereq(a.setslice, (0, 10, "foo"))
del a[0:10]
vereq(a.delslice, (0, 10))
def methods():
if verbose: print "Testing methods..."
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
vereq(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
vereq(d2.foo(), 2)
vereq(d2.boo(), 2)
vereq(d2.goo(), 1)
class E(object):
foo = C.foo
vereq(E().foo, C.foo) # i.e., unbound
verify(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def specials():
# Test operators like __hash__ for which a built-in default exists
if verbose: print "Testing special operators..."
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
verify(not not c1)
vereq(hash(c1), id(c1))
vereq(cmp(c1, c2), cmp(id(c1), id(c2)))
vereq(c1, c1)
verify(c1 != c2)
verify(not c1 != c1)
verify(not c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
verify(str(c1).find('C object at ') >= 0)
vereq(str(c1), repr(c1))
verify(-1 not in c1)
for i in range(10):
verify(i in c1)
verify(10 not in c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
verify(not not d1)
vereq(hash(d1), id(d1))
vereq(cmp(d1, d2), cmp(id(d1), id(d2)))
vereq(d1, d1)
verify(d1 != d2)
verify(not d1 != d1)
verify(not d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
verify(str(d1).find('D object at ') >= 0)
vereq(str(d1), repr(d1))
verify(-1 not in d1)
for i in range(10):
verify(i in d1)
verify(10 not in d1)
# Test overridden behavior for static classes
class Proxy(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __cmp__(self, other):
return cmp(self.x, other.x)
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
verify(not p0)
verify(not not p1)
vereq(hash(p0), hash(0))
vereq(p0, p0)
verify(p0 != p1)
verify(not p0 != p0)
vereq(not p0, p1)
vereq(cmp(p0, p1), -1)
vereq(cmp(p0, p0), 0)
vereq(cmp(p0, p_1), 1)
vereq(str(p0), "Proxy:0")
vereq(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
verify(-1 not in p10)
for i in range(10):
verify(i in p10)
verify(10 not in p10)
# Test overridden behavior for dynamic classes
class DProxy(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __cmp__(self, other):
return cmp(self.x, other.x)
def __str__(self):
return "DProxy:%s" % self.x
def __repr__(self):
return "DProxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = DProxy(0)
p1 = DProxy(1)
p_1 = DProxy(-1)
verify(not p0)
verify(not not p1)
vereq(hash(p0), hash(0))
vereq(p0, p0)
verify(p0 != p1)
verify(not p0 != p0)
vereq(not p0, p1)
vereq(cmp(p0, p1), -1)
vereq(cmp(p0, p0), 0)
vereq(cmp(p0, p_1), 1)
vereq(str(p0), "DProxy:0")
vereq(repr(p0), "DProxy(0)")
p10 = DProxy(range(10))
verify(-1 not in p10)
for i in range(10):
verify(i in p10)
verify(10 not in p10)
# Safety test for __cmp__
def unsafecmp(a, b):
try:
a.__class__.__cmp__(a, b)
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow %s.__cmp__(%r, %r)" % (
a.__class__, a, b)
unsafecmp(u"123", "123")
unsafecmp("123", u"123")
unsafecmp(1, 1.0)
unsafecmp(1.0, 1)
unsafecmp(1, 1L)
unsafecmp(1L, 1)
class Letter(str):
def __new__(cls, letter):
if letter == 'EPS':
return str.__new__(cls)
return str.__new__(cls, letter)
def __str__(self):
if not self:
return 'EPS'
return self
# sys.stdout needs to be the original to trigger the recursion bug
import sys
test_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
# nothing should actually be printed, this should raise an exception
print Letter('w')
except RuntimeError:
pass
else:
raise TestFailed, "expected a RuntimeError for print recursion"
sys.stdout = test_stdout
def weakrefs():
if verbose: print "Testing weak references..."
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
verify(r() is c)
del c
verify(r() is None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError, msg:
verify(str(msg).find("weak reference") >= 0)
else:
verify(0, "weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
verify(r() is yes)
del yes
verify(r() is None)
del r
def properties():
if verbose: print "Testing property..."
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
verify(not hasattr(a, "x"))
a.x = 42
vereq(a._C__x, 42)
vereq(a.x, 42)
del a.x
verify(not hasattr(a, "x"))
verify(not hasattr(a, "_C__x"))
C.x.__set__(a, 100)
vereq(C.x.__get__(a), 100)
C.x.__delete__(a)
verify(not hasattr(a, "x"))
raw = C.__dict__['x']
verify(isinstance(raw, property))
attrs = dir(raw)
verify("__doc__" in attrs)
verify("fget" in attrs)
verify("fset" in attrs)
verify("fdel" in attrs)
vereq(raw.__doc__, "I'm the x property.")
verify(raw.fget is C.__dict__['getx'])
verify(raw.fset is C.__dict__['setx'])
verify(raw.fdel is C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except TypeError, msg:
if str(msg).find('readonly') < 0:
raise TestFailed("when setting readonly attr %r on a "
"property, got unexpected TypeError "
"msg %r" % (attr, str(msg)))
else:
raise TestFailed("expected TypeError from trying to set "
"readonly %r attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
raise TestFailed, "expected ZeroDivisionError from bad property"
def supers():
if verbose: print "Testing super..."
class A(object):
def meth(self, a):
return "A(%r)" % a
vereq(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
vereq(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
vereq(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
vereq(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
vereq(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
vereq(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D, 42)"
try:
super(D, C())
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D, C())"
try:
super(D).__get__(12)
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D).__get__(12)"
try:
super(D).__get__(C())
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow super(D).__get__(C())"
def inherits():
if verbose: print "Testing inheritance from basic types..."
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
vereq(repr(hexint(7) + 9), "0x10")
vereq(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
vereq(a, 12345)
vereq(int(a), 12345)
verify(int(a).__class__ is int)
vereq(hash(a), hash(12345))
verify((+a).__class__ is int)
verify((a >> 0).__class__ is int)
verify((a << 0).__class__ is int)
verify((hexint(0) << 12).__class__ is int)
verify((hexint(0) >> 12).__class__ is int)
class octlong(long):
__slots__ = []
def __str__(self):
s = oct(self)
if s[-1] == 'L':
s = s[:-1]
return s
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
vereq(str(octlong(3) + 5), "010")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
vereq(str(5 + octlong(3000)), "05675")
a = octlong(12345)
vereq(a, 12345L)
vereq(long(a), 12345L)
vereq(hash(a), hash(12345L))
verify(long(a).__class__ is long)
verify((+a).__class__ is long)
verify((-a).__class__ is long)
verify((-octlong(0)).__class__ is long)
verify((a >> 0).__class__ is long)
verify((a << 0).__class__ is long)
verify((a - 0).__class__ is long)
verify((a * 1).__class__ is long)
verify((a ** 1).__class__ is long)
verify((a // 1).__class__ is long)
verify((1 * a).__class__ is long)
verify((a | 0).__class__ is long)
verify((a ^ 0).__class__ is long)
verify((a & -1L).__class__ is long)
verify((octlong(0) << 12).__class__ is long)
verify((octlong(0) >> 12).__class__ is long)
verify(abs(octlong(0)).__class__ is long)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(long):
pass
a = longclone(1)
verify((a + 0).__class__ is long)
verify((0 + a).__class__ is long)
# Check that negative clones don't segfault
a = longclone(-1)
vereq(a.__dict__, {})
vereq(long(a), -1) # verify PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
float.__init__(value)
def __repr__(self):
return "%.*g" % (self.prec, self)
vereq(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
vereq(a, 12345.0)
vereq(float(a), 12345.0)
verify(float(a).__class__ is float)
vereq(hash(a), hash(12345.0))
verify((+a).__class__ is float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
vereq(repr(a), "4j-3")
base = complex(-3, 4)
veris(base.__class__, complex)
vereq(a, base)
vereq(complex(a), base)
veris(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
vereq(repr(a), "4j-3")
vereq(a, base)
vereq(complex(a), base)
veris(complex(a).__class__, complex)
vereq(hash(a), hash(base))
veris((+a).__class__, complex)
veris((a + 0).__class__, complex)
vereq(a + 0, base)
veris((a - 0).__class__, complex)
vereq(a - 0, base)
veris((a * 1).__class__, complex)
vereq(a * 1, base)
veris((a / 1).__class__, complex)
vereq(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
vereq(a, (1,2,3,4,5,6,7,8,9,0))
vereq(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
vereq(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
vereq(v, t)
a = madtuple((1,2,3,4,5))
vereq(tuple(a), (1,2,3,4,5))
verify(tuple(a).__class__ is tuple)
vereq(hash(a), hash((1,2,3,4,5)))
verify(a[:].__class__ is tuple)
verify((a * 1).__class__ is tuple)
verify((a * 0).__class__ is tuple)
verify((a + ()).__class__ is tuple)
a = madtuple(())
vereq(tuple(a), ())
verify(tuple(a).__class__ is tuple)
verify((a + a).__class__ is tuple)
verify((a * 0).__class__ is tuple)
verify((a * 1).__class__ is tuple)
verify((a * 2).__class__ is tuple)
verify(a[:].__class__ is tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
vereq(s, "abcdefghijklmnopqrstuvwxyz")
vereq(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
vereq(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
vereq(u, s)
s = madstring("12345")
vereq(str(s), "12345")
verify(str(s).__class__ is str)
base = "\x00" * 5
s = madstring(base)
vereq(s, base)
vereq(str(s), base)
verify(str(s).__class__ is str)
vereq(hash(s), hash(base))
vereq({s: 1}[base], 1)
vereq({base: 1}[s], 1)
verify((s + "").__class__ is str)
vereq(s + "", base)
verify(("" + s).__class__ is str)
vereq("" + s, base)
verify((s * 0).__class__ is str)
vereq(s * 0, "")
verify((s * 1).__class__ is str)
vereq(s * 1, base)
verify((s * 2).__class__ is str)
vereq(s * 2, base + base)
verify(s[:].__class__ is str)
vereq(s[:], base)
verify(s[0:0].__class__ is str)
vereq(s[0:0], "")
verify(s.strip().__class__ is str)
vereq(s.strip(), base)
verify(s.lstrip().__class__ is str)
vereq(s.lstrip(), base)
verify(s.rstrip().__class__ is str)
vereq(s.rstrip(), base)
identitytab = ''.join([chr(i) for i in range(256)])
verify(s.translate(identitytab).__class__ is str)
vereq(s.translate(identitytab), base)
verify(s.translate(identitytab, "x").__class__ is str)
vereq(s.translate(identitytab, "x"), base)
vereq(s.translate(identitytab, "\x00"), "")
verify(s.replace("x", "x").__class__ is str)
vereq(s.replace("x", "x"), base)
verify(s.ljust(len(s)).__class__ is str)
vereq(s.ljust(len(s)), base)
verify(s.rjust(len(s)).__class__ is str)
vereq(s.rjust(len(s)), base)
verify(s.center(len(s)).__class__ is str)
vereq(s.center(len(s)), base)
verify(s.lower().__class__ is str)
vereq(s.lower(), base)
s = madstring("x y")
vereq(s, "x y")
verify(intern(s).__class__ is str)
verify(intern(s) is intern("x y"))
vereq(intern(s), "x y")
i = intern("y x")
s = madstring("y x")
vereq(s, i)
verify(intern(s).__class__ is str)
verify(intern(s) is i)
s = madstring(i)
verify(intern(s).__class__ is str)
verify(intern(s) is i)
class madunicode(unicode):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(u"".join(L))
return self._rev
u = madunicode("ABCDEF")
vereq(u, u"ABCDEF")
vereq(u.rev(), madunicode(u"FEDCBA"))
vereq(u.rev().rev(), madunicode(u"ABCDEF"))
base = u"12345"
u = madunicode(base)
vereq(unicode(u), base)
verify(unicode(u).__class__ is unicode)
vereq(hash(u), hash(base))
vereq({u: 1}[base], 1)
vereq({base: 1}[u], 1)
verify(u.strip().__class__ is unicode)
vereq(u.strip(), base)
verify(u.lstrip().__class__ is unicode)
vereq(u.lstrip(), base)
verify(u.rstrip().__class__ is unicode)
vereq(u.rstrip(), base)
verify(u.replace(u"x", u"x").__class__ is unicode)
vereq(u.replace(u"x", u"x"), base)
verify(u.replace(u"xy", u"xy").__class__ is unicode)
vereq(u.replace(u"xy", u"xy"), base)
verify(u.center(len(u)).__class__ is unicode)
vereq(u.center(len(u)), base)
verify(u.ljust(len(u)).__class__ is unicode)
vereq(u.ljust(len(u)), base)
verify(u.rjust(len(u)).__class__ is unicode)
vereq(u.rjust(len(u)), base)
verify(u.lower().__class__ is unicode)
vereq(u.lower(), base)
verify(u.upper().__class__ is unicode)
vereq(u.upper(), base)
verify(u.capitalize().__class__ is unicode)
vereq(u.capitalize(), base)
verify(u.title().__class__ is unicode)
vereq(u.title(), base)
verify((u + u"").__class__ is unicode)
vereq(u + u"", base)
verify((u"" + u).__class__ is unicode)
vereq(u"" + u, base)
verify((u * 0).__class__ is unicode)
vereq(u * 0, u"")
verify((u * 1).__class__ is unicode)
vereq(u * 1, base)
verify((u * 2).__class__ is unicode)
vereq(u * 2, base + base)
verify(u[:].__class__ is unicode)
vereq(u[:], base)
verify(u[0:0].__class__ is unicode)
vereq(u[0:0], u"")
class sublist(list):
pass
a = sublist(range(5))
vereq(a, range(5))
a.append("hello")
vereq(a, range(5) + ["hello"])
a[5] = 5
vereq(a, range(6))
a.extend(range(6, 20))
vereq(a, range(20))
a[-5:] = []
vereq(a, range(15))
del a[10:15]
vereq(len(a), 10)
vereq(a, range(10))
vereq(list(a), range(10))
vereq(a[0], 0)
vereq(a[9], 9)
vereq(a[-10], 0)
vereq(a[-1], 9)
vereq(a[:5], range(5))
class CountedInput(file):
"""Counts lines read by self.readline().
self.lineno is the 0-based ordinal of the last line read, up to
a maximum of one greater than the number of lines in the file.
self.ateof is true if and only if the final "" line has been read,
at which point self.lineno stops incrementing, and further calls
to readline() continue to return "".
"""
lineno = 0
ateof = 0
def readline(self):
if self.ateof:
return ""
s = file.readline(self)
# Next line works too.
# s = super(CountedInput, self).readline()
self.lineno += 1
if s == "":
self.ateof = 1
return s
f = file(name=TESTFN, mode='w')
lines = ['a\n', 'b\n', 'c\n']
try:
f.writelines(lines)
f.close()
f = CountedInput(TESTFN)
for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
got = f.readline()
vereq(expected, got)
vereq(f.lineno, i)
vereq(f.ateof, (i > len(lines)))
f.close()
finally:
try:
f.close()
except:
pass
try:
import os
os.unlink(TESTFN)
except:
pass
def keywords():
if verbose:
print "Testing keyword args to basic type constructors ..."
vereq(int(x=1), 1)
vereq(float(x=2), 2.0)
vereq(long(x=3), 3L)
vereq(complex(imag=42, real=666), complex(666, 42))
vereq(str(object=500), '500')
vereq(unicode(string='abc', errors='strict'), u'abc')
vereq(tuple(sequence=range(3)), (0, 1, 2))
vereq(list(sequence=(0, 1, 2)), range(3))
vereq(dict(items={1: 2}), {1: 2})
for constructor in (int, float, long, complex, str, unicode,
tuple, list, dict, file):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
raise TestFailed("expected TypeError from bogus keyword "
"argument to %r" % constructor)
def restricted():
# XXX This test is disabled because rexec is not deemed safe
return
import rexec
if verbose:
print "Testing interaction with restricted execution ..."
sandbox = rexec.RExec()
code1 = """f = open(%r, 'w')""" % TESTFN
code2 = """f = file(%r, 'w')""" % TESTFN
code3 = """\
f = open(%r)
t = type(f) # a sneaky way to get the file() constructor
f.close()
f = t(%r, 'w') # rexec can't catch this by itself
""" % (TESTFN, TESTFN)
f = open(TESTFN, 'w') # Create the file so code3 can find it.
f.close()
try:
for code in code1, code2, code3:
try:
sandbox.r_exec(code)
except IOError, msg:
if str(msg).find("restricted") >= 0:
outcome = "OK"
else:
outcome = "got an exception, but not an expected one"
else:
outcome = "expected a restricted-execution exception"
if outcome != "OK":
raise TestFailed("%s, in %r" % (outcome, code))
finally:
try:
import os
os.unlink(TESTFN)
except:
pass
def str_subclass_as_dict_key():
if verbose:
print "Testing a str subclass used as dict key .."
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
vereq(cistr('ABC'), 'abc')
vereq('aBc', cistr('ABC'))
vereq(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
vereq(d[cistr('one')], 1)
vereq(d[cistr('tWo')], 2)
vereq(d[cistr('THrEE')], 3)
verify(cistr('ONe') in d)
vereq(d.get(cistr('thrEE')), 3)
def classic_comparisons():
if verbose: print "Testing classic comparisons..."
class classic:
pass
for base in (classic, int, object):
if verbose: print " (base = %s)" % base
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self, other):
if isinstance(other, C):
return cmp(self.value, other.value)
if isinstance(other, int) or isinstance(other, long):
return cmp(self.value, other)
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
vereq(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
verify(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
for op in "<", "<=", "==", "!=", ">", ">=":
verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
verify(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y))
verify(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
def rich_comparisons():
if verbose:
print "Testing rich comparisons..."
class Z(complex):
pass
z = Z(1)
vereq(z, 1+0j)
vereq(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
zz = ZZ(1.0000003)
vereq(zz, 1+0j)
vereq(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
if verbose: print " (base = %s)" % base
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self, other):
raise TestFailed, "shouldn't call __cmp__"
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, long):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, long):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, long):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, long):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, long):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, long):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
vereq(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
verify(eval("c[x] %s y" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
verify(eval("x %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def coercions():
if verbose: print "Testing coercions..."
class I(int): pass
coerce(I(0), 0)
coerce(0, I(0))
class L(long): pass
coerce(L(0), 0)
coerce(L(0), 0L)
coerce(0, L(0))
coerce(0L, L(0))
class F(float): pass
coerce(F(0), 0)
coerce(F(0), 0L)
coerce(F(0), 0.)
coerce(0, F(0))
coerce(0L, F(0))
coerce(0., F(0))
class C(complex): pass
coerce(C(0), 0)
coerce(C(0), 0L)
coerce(C(0), 0.)
coerce(C(0), 0j)
coerce(0, C(0))
coerce(0L, C(0))
coerce(0., C(0))
coerce(0j, C(0))
def descrdoc():
if verbose: print "Testing descriptor doc strings..."
def check(descr, what):
vereq(descr.__doc__, what)
check(file.closed, "flag set if the file is closed") # getset descriptor
check(file.name, "file name") # member descriptor
def setclass():
if verbose: print "Testing __class__ assignment..."
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
verify(x.__class__ is cls2)
x.__class__ = cls
verify(x.__class__ is cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow %r.__class__ = %r" % (x, C)
try:
delattr(x, "__class__")
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow del %r.__class__" % x
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
def setdict():
if verbose: print "Testing __dict__ assignment..."
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
vereq(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except TypeError:
pass
else:
raise TestFailed, "shouldn't allow %r.__dict__ = %r" % (x, dict)
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
# Classes don't allow __dict__ assignment
cant(C, {})
def pickles():
if verbose:
print "Testing pickling and copying new-style classes and objects..."
import pickle, cPickle
def sorteditems(d):
L = d.items()
L.sort()
return L
global C
class C(object):
def __init__(self, a, b):
super(C, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "C(%r, %r)" % (self.a, self.b)
global C1
class C1(list):
def __new__(cls, a, b):
return super(C1, cls).__new__(cls)
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
global C2
class C2(int):
def __new__(cls, a, b, val=0):
return super(C2, cls).__new__(cls, val)
def __init__(self, a, b, val=0):
self.a = a
self.b = b
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
global C3
class C3(object):
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, foo):
self.foo = foo
global C4classic, C4
class C4classic: # classic
pass
class C4(C4classic, object): # mixed inheritance
pass
for p in pickle, cPickle:
for bin in 0, 1:
if verbose:
print p.__name__, ["text", "binary"][bin]
for cls in C, C1, C2:
s = p.dumps(cls, bin)
cls2 = p.loads(s)
verify(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
s = p.dumps((a, b), bin)
x, y = p.loads(s)
vereq(x.__class__, a.__class__)
vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
vereq(y.__class__, b.__class__)
vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
vereq(`x`, `a`)
vereq(`y`, `b`)
if verbose:
print "a = x =", a
print "b = y =", b
# Test for __getstate__ and __setstate__ on new style class
u = C3(42)
s = p.dumps(u, bin)
v = p.loads(s)
veris(u.__class__, v.__class__)
vereq(u.foo, v.foo)
# Test for picklability of hybrid class
u = C4()
u.foo = 42
s = p.dumps(u, bin)
v = p.loads(s)
veris(u.__class__, v.__class__)
vereq(u.foo, v.foo)
# Testing copy.deepcopy()
if verbose:
print "deepcopy"
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
verify(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
x, y = copy.deepcopy((a, b))
vereq(x.__class__, a.__class__)
vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
vereq(y.__class__, b.__class__)
vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
vereq(`x`, `a`)
vereq(`y`, `b`)
if verbose:
print "a = x =", a
print "b = y =", b
def pickleslots():
if verbose: print "Testing pickling of classes with __slots__ ..."
import pickle, cPickle
# Pickling of classes with __slots__ but without __getstate__ should fail
global B, C, D, E
class B(object):
pass
for base in [object, B]:
class C(base):
__slots__ = ['a']
class D(C):
pass
try:
pickle.dumps(C())
except TypeError:
pass
else:
raise TestFailed, "should fail: pickle C instance - %s" % base
try:
cPickle.dumps(C())
except TypeError:
pass
else:
raise TestFailed, "should fail: cPickle C instance - %s" % base
try:
pickle.dumps(C())
except TypeError:
pass
else:
raise TestFailed, "should fail: pickle D instance - %s" % base
try:
cPickle.dumps(D())
except TypeError:
pass
else:
raise TestFailed, "should fail: cPickle D instance - %s" % base
# Give C a __getstate__ and __setstate__
class C(base):
__slots__ = ['a']
def __getstate__(self):
try:
d = self.__dict__.copy()
except AttributeError:
d = {}
try:
d['a'] = self.a
except AttributeError:
pass
return d
def __setstate__(self, d):
for k, v in d.items():
setattr(self, k, v)
class D(C):
pass
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
vereq(hasattr(y, 'a'), 0)
y = cPickle.loads(cPickle.dumps(x))
vereq(hasattr(y, 'a'), 0)
x.a = 42
y = pickle.loads(pickle.dumps(x))
vereq(y.a, 42)
y = cPickle.loads(cPickle.dumps(x))
vereq(y.a, 42)
x = D()
x.a = 42
x.b = 100
y = pickle.loads(pickle.dumps(x))
vereq(y.a + y.b, 142)
y = cPickle.loads(cPickle.dumps(x))
vereq(y.a + y.b, 142)
# But a subclass that adds a slot should not work
class E(C):
__slots__ = ['b']
try:
pickle.dumps(E())
except TypeError:
pass
else:
raise TestFailed, "should fail: pickle E instance - %s" % base
try:
cPickle.dumps(E())
except TypeError:
pass
else:
raise TestFailed, "should fail: cPickle E instance - %s" % base
def copies():
if verbose: print "Testing copy.copy() and copy.deepcopy()..."
import copy
class C(object):
pass
a = C()
a.foo = 12
b = copy.copy(a)
vereq(b.__dict__, a.__dict__)
a.bar = [1,2,3]
c = copy.copy(a)
vereq(c.bar, a.bar)
verify(c.bar is a.bar)
d = copy.deepcopy(a)
vereq(d.__dict__, a.__dict__)
a.bar.append(4)
vereq(d.bar, [1,2,3])
def binopoverride():
if verbose: print "Testing overrides of binary operations..."
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
vereq(`I(1) + I(2)`, "I(3)")
vereq(`I(1) + 2`, "I(3)")
vereq(`1 + I(2)`, "I(3)")
vereq(`I(2) ** I(3)`, "I(8)")
vereq(`2 ** I(3)`, "I(8)")
vereq(`I(2) ** 3`, "I(8)")
vereq(`pow(I(2), I(3), I(5))`, "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
def subclasspropagation():
if verbose: print "Testing propagation of slot functions to subclasses..."
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
vereq(hash(d), id(d))
A.__hash__ = lambda self: 42
vereq(hash(d), 42)
C.__hash__ = lambda self: 314
vereq(hash(d), 314)
B.__hash__ = lambda self: 144
vereq(hash(d), 144)
D.__hash__ = lambda self: 100
vereq(hash(d), 100)
del D.__hash__
vereq(hash(d), 144)
del B.__hash__
vereq(hash(d), 314)
del C.__hash__
vereq(hash(d), 42)
del A.__hash__
vereq(hash(d), id(d))
d.foo = 42
d.bar = 42
vereq(d.foo, 42)
vereq(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
vereq(d.foo, 24)
vereq(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError, name
B.__getattr__ = __getattr__
vereq(d.spam, "hello")
vereq(d.foo, 24)
vereq(d.bar, 42)
del A.__getattribute__
vereq(d.foo, 42)
del d.foo
vereq(d.foo, "hello")
vereq(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
raise TestFailed, "d.foo should be undefined now"
# Test a nasty bug in recurse_down_subclasses()
import gc
class A(object):
pass
class B(A):
pass
del B
gc.collect()
A.__setitem__ = lambda *a: None # crash
def buffer_inherit():
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
if verbose:
print "Testing that buffer interface is inherited ..."
class MyStr(str):
pass
base = 'abc'
m = MyStr(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
# It's not clear that unicode will continue to support the character
# buffer interface, and this test will fail if that's taken away.
class MyUni(unicode):
pass
base = u'abc'
m = MyUni(base)
vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
raise TestFailed('subclass of int should not have a buffer interface')
except TypeError:
pass
def str_of_str_subclass():
import binascii
import cStringIO
if verbose:
print "Testing __str__ defined in subclass of str ..."
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self)
def __repr__(self):
return self + " repr"
o = octetstring('A')
vereq(type(o), octetstring)
vereq(type(str(o)), str)
vereq(type(repr(o)), str)
vereq(ord(o), 0x41)
vereq(str(o), '41')
vereq(repr(o), 'A repr')
vereq(o.__str__(), '41')
vereq(o.__repr__(), 'A repr')
capture = cStringIO.StringIO()
# Calling str() or not exercises different internal paths.
print >> capture, o
print >> capture, str(o)
vereq(capture.getvalue(), '41\n41\n')
capture.close()
def kwdargs():
if verbose: print "Testing keyword arguments to __init__, __call__..."
def f(a): return a
vereq(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
vereq(a, [0, 1, 2])
def delhook():
if verbose: print "Testing __del__ hook..."
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
vereq(log, [])
del c
vereq(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: raise TestFailed, "invalid del() didn't raise TypeError"
def hashinherit():
if verbose: print "Testing hash of mutable subclasses..."
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
raise TestFailed, "hash() of dict subclass should fail"
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
raise TestFailed, "hash() of list subclass should fail"
def strops():
try: 'a' + 5
except TypeError: pass
else: raise TestFailed, "'' + 5 doesn't raise TypeError"
try: ''.split('')
except ValueError: pass
else: raise TestFailed, "''.split('') doesn't raise ValueError"
try: ''.join([0])
except TypeError: pass
else: raise TestFailed, "''.join([0]) doesn't raise TypeError"
try: ''.rindex('5')
except ValueError: pass
else: raise TestFailed, "''.rindex('5') doesn't raise ValueError"
try: ''.replace('', '')
except ValueError: pass
else: raise TestFailed, "''.replace('', '') doesn't raise ValueError"
try: '%(n)s' % None
except TypeError: pass
else: raise TestFailed, "'%(n)s' % None doesn't raise TypeError"
try: '%(n' % {}
except ValueError: pass
else: raise TestFailed, "'%(n' % {} '' doesn't raise ValueError"
try: '%*s' % ('abc')
except TypeError: pass
else: raise TestFailed, "'%*s' % ('abc') doesn't raise TypeError"
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: raise TestFailed, "'%*.*s' % ('abc', 5) doesn't raise TypeError"
try: '%s' % (1, 2)
except TypeError: pass
else: raise TestFailed, "'%s' % (1, 2) doesn't raise TypeError"
try: '%' % None
except ValueError: pass
else: raise TestFailed, "'%' % None doesn't raise ValueError"
vereq('534253'.isdigit(), 1)
vereq('534253x'.isdigit(), 0)
vereq('%c' % 5, '\x05')
vereq('%c' % '5', '5')
def deepcopyrecursive():
if verbose: print "Testing deepcopy of recursive objects..."
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def modules():
if verbose: print "Testing uninitialized module objects..."
from types import ModuleType as M
m = M.__new__(M)
str(m)
vereq(hasattr(m, "__name__"), 0)
vereq(hasattr(m, "__file__"), 0)
vereq(hasattr(m, "foo"), 0)
vereq(m.__dict__, None)
m.foo = 1
vereq(m.__dict__, {"foo": 1})
def docdescriptor():
# SF bug 542984
if verbose: print "Testing __doc__ descriptor..."
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
vereq(OldClass.__doc__, 'object=None; type=OldClass')
vereq(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
vereq(NewClass.__doc__, 'object=None; type=NewClass')
vereq(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def imulbug():
# SF bug 544647
if verbose: print "Testing for __imul__ problems..."
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
vereq(y, (x, 1.0))
y = x
y *= 2
vereq(y, (x, 2))
y = x
y *= 3L
vereq(y, (x, 3L))
y = x
y *= 1L<<100
vereq(y, (x, 1L<<100))
y = x
y *= None
vereq(y, (x, None))
y = x
y *= "foo"
vereq(y, (x, "foo"))
def copy_setstate():
if verbose:
print "Testing that copy.*copy() correctly uses __setstate__..."
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self, lst):
assert len(lst) == 1
self.__foo = self.foo = lst[0]
a = C(42)
a.setfoo(24)
vereq(a.foo, 24)
vereq(a.getfoo(), 42)
b = copy.copy(a)
vereq(b.foo, 24)
vereq(b.getfoo(), 24)
b = copy.deepcopy(a)
vereq(b.foo, 24)
vereq(b.getfoo(), 24)
def subtype_resurrection():
if verbose:
print "Testing resurrection of new-style instance..."
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to flawed
# GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 bug).
del c
# If that didn't blow up, it's also interesting to see whether clearing
# the last container slot works: that will attempt to delete c again,
# which will cause c to get appended back to the container again "during"
# the del.
del C.container[-1]
vereq(len(C.container), 1)
vereq(C.container[-1].attr, 42)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def funnynew():
if verbose: print "Testing __new__ returning something unexpected..."
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
vereq(C("1"), [1, 2, 3])
vereq(D("1"), [1, 2, 3])
d = D(None)
veris(d.foo, None)
d = C(1)
vereq(isinstance(d, D), True)
vereq(d.foo, 1)
d = D(1)
vereq(isinstance(d, D), True)
vereq(d.foo, 1)
def subclass_right_op():
if verbose:
print "Testing correct dispatch of subclass overloading __r<op>__..."
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __div__(self, other):
return "B.__div__"
def __rdiv__(self, other):
return "B.__rdiv__"
vereq(B(1) / 1, "B.__div__")
vereq(1 / B(1), "B.__rdiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __div__(self, other):
return "C.__div__"
def __rdiv__(self, other):
return "C.__rdiv__"
vereq(C(1) / 1, "C.__div__")
vereq(1 / C(1), "C.__rdiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __div__(self, other):
return "D.__div__"
def __rdiv__(self, other):
return "D.__rdiv__"
vereq(D(1) / C(1), "D.__div__")
vereq(C(1) / D(1), "D.__rdiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
vereq(E.__rdiv__, C.__rdiv__)
vereq(E(1) / 1, "C.__div__")
vereq(1 / E(1), "C.__rdiv__")
vereq(E(1) / C(1), "C.__div__")
vereq(C(1) / E(1), "C.__div__") # This one would fail
def dict_type_with_metaclass():
if verbose:
print "Testing type of __dict__ when __metaclass__ set..."
class B(object):
pass
class M(type):
pass
class C:
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
__metaclass__ = M
veris(type(C.__dict__), type(B.__dict__))
def weakref_segfault():
# SF 742911
if verbose:
print "Testing weakref segfault..."
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def crash_in_get_sf736892():
def func():
pass
try:
f = func.__get__(None)
except TypeError:
pass
else:
# should not get here
f(1) # crash
def test_main():
weakref_segfault() # Must be first, somehow
class_docstrings()
lists()
dicts()
dict_constructor()
test_dir()
ints()
longs()
floats()
complexes()
spamlists()
spamdicts()
pydicts()
pylists()
metaclass()
pymods()
multi()
diamond()
objects()
slots()
dynamics()
errors()
classmethods()
staticmethods()
classic()
compattr()
newslot()
altmro()
overloading()
methods()
specials()
weakrefs()
properties()
supers()
inherits()
keywords()
restricted()
str_subclass_as_dict_key()
classic_comparisons()
rich_comparisons()
coercions()
descrdoc()
setclass()
setdict()
pickles()
copies()
binopoverride()
subclasspropagation()
buffer_inherit()
str_of_str_subclass()
kwdargs()
delhook()
hashinherit()
strops()
deepcopyrecursive()
modules()
pickleslots()
docdescriptor()
imulbug()
copy_setstate()
subtype_resurrection()
funnynew()
subclass_right_op()
dict_type_with_metaclass()
crash_in_get_sf736892()
if verbose: print "All OK"
if __name__ == "__main__":
test_main()
| gpl-3.0 |
hsuantien/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_collections.py | 2 | 21231 | """
Tests specific to the collections module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import numpy as np
from numpy.testing import (
assert_array_equal, assert_array_almost_equal, assert_equal)
import pytest
import matplotlib.pyplot as plt
import matplotlib.collections as mcollections
import matplotlib.transforms as mtransforms
from matplotlib.collections import Collection, EventCollection
from matplotlib.testing.decorators import image_comparison
def generate_EventCollection_plot():
'''
generate the initial collection and plot it
'''
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig = plt.figure()
splt = fig.add_subplot(1, 1, 1)
splt.add_collection(coll)
splt.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
splt.set_xlim(-1, 22)
splt.set_ylim(0, 2)
return splt, coll, props
@image_comparison(baseline_images=['EventCollection_plot__default'])
def test__EventCollection__get_segments():
'''
check to make sure the default segments have the correct coordinates
'''
_, coll, props = generate_EventCollection_plot()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
def test__EventCollection__get_positions():
'''
check to make sure the default positions match the input positions
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['positions'], coll.get_positions())
def test__EventCollection__get_orientation():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['orientation'], coll.get_orientation())
def test__EventCollection__is_horizontal():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(True, coll.is_horizontal())
def test__EventCollection__get_linelength():
'''
check to make sure the default linelength matches the input linelength
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['linelength'], coll.get_linelength())
def test__EventCollection__get_lineoffset():
'''
check to make sure the default lineoffset matches the input lineoffset
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['lineoffset'], coll.get_lineoffset())
def test__EventCollection__get_linestyle():
'''
check to make sure the default linestyle matches the input linestyle
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(coll.get_linestyle(), [(None, None)])
def test__EventCollection__get_color():
'''
check to make sure the default color matches the input color
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['color'], coll.get_color())
check_allprop_array(coll.get_colors(), props['color'])
@image_comparison(baseline_images=['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
'''
check to make sure set_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
'''
check to make sure add_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.add_positions(props['extra_positions'][0])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(baseline_images=['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
'''
check to make sure append_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
'''
check to make sure extend_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
'''
check to make sure switch_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(
baseline_images=['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
'''
check to make sure calling switch_orientation twice sets the
orientation back to the default
'''
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert_equal(props['orientation'], coll.get_orientation())
assert_equal(True, coll.is_horizontal())
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(baseline_images=['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
'''
check to make sure set_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(baseline_images=['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
'''
check to make sure set_linelength works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert_equal(new_linelength, coll.get_linelength())
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(baseline_images=['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
'''
check to make sure set_lineoffset works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert_equal(new_lineoffset, coll.get_lineoffset())
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison(baseline_images=['EventCollection_plot__set_linestyle'])
def test__EventCollection__set_linestyle():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = 'dashed'
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_ls_dash'],
remove_text=True)
def test__EventCollection__set_linestyle_single_dash():
'''
check to make sure set_linestyle accepts a single dash pattern
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = (0, (6., 6.))
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_linewidth'])
def test__EventCollection__set_linewidth():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linewidth = 5
coll.set_linewidth(new_linewidth)
assert_equal(coll.get_linewidth(), new_linewidth)
splt.set_title('EventCollection: set_linewidth')
@image_comparison(baseline_images=['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
'''
check to make sure set_color works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
np.testing.assert_array_equal(new_color, coll.get_color())
check_allprop_array(coll.get_colors(), new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
'''
check to make sure all values in the segment are correct, given a
particular set of inputs
note: this is not a test, it is used by tests
'''
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert_equal(segment[0, pos1], lineoffset + linelength / 2.)
assert_equal(segment[1, pos1], lineoffset - linelength / 2.)
assert_equal(segment[0, pos2], positions[i])
assert_equal(segment[1, pos2], positions[i])
def check_allprop_array(values, target):
'''
check to make sure all values match the given target if arrays
note: this is not a test, it is used by tests
'''
for value in values:
np.testing.assert_array_equal(value, target)
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# Github issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert_equal(ax.dataLim.bounds, bounds)
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert_equal(q.get_datalim(ax.transData).bounds, (0., 0., 7., 9.))
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert_equal(ax.dataLim.bounds, (20.0, 30.0, 15.0, 6.0))
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(baseline_images=['EllipseCollection_test_image'],
extensions=['png'],
remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X/float(x[-1])
hh = Y/float(y[-1])
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(baseline_images=['polycollection_close'],
extensions=['png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = Axes3D(fig)
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(baseline_images=['regularpolycollection_rotate'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=[xy], transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(baseline_images=['regularpolycollection_scale'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super(SquareCollection, self).__init__(
4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1 / sqrt(2)`, so `pi * r**2`
# equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000], picker=True)
fig.savefig(io.BytesIO(), dpi=fig.dpi)
class MouseEvent(object):
pass
event = MouseEvent()
event.x = 325
event.y = 240
found, indices = col.contains(event)
assert found
assert_array_equal(indices['ind'], [0])
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
@image_comparison(baseline_images=['size_in_xy'], remove_text=True,
extensions=['png'])
def test_size_in_xy():
fig, ax = plt.subplots()
widths, heights, angles = (10, 10), 10, 0
widths = 10, 10
coords = [(10, 10), (15, 15)]
e = mcollections.EllipseCollection(
widths, heights, angles,
units='xy',
offsets=coords,
transOffset=ax.transData)
ax.add_collection(e)
ax.set_xlim(0, 30)
ax.set_ylim(0, 30)
def test_pandas_indexing():
pd = pytest.importorskip('pandas')
# Should not fail break when faced with a
# non-zero indexed series
index = [11, 12, 13]
ec = fc = pd.Series(['red', 'blue', 'green'], index=index)
lw = pd.Series([1, 2, 3], index=index)
ls = pd.Series(['solid', 'dashed', 'dashdot'], index=index)
aa = pd.Series([True, False, True], index=index)
Collection(edgecolors=ec)
Collection(facecolors=fc)
Collection(linewidths=lw)
Collection(linestyles=ls)
Collection(antialiaseds=aa)
@pytest.mark.style('default')
def test_lslw_bcast():
col = mcollections.PathCollection([])
col.set_linestyles(['-', '-'])
col.set_linewidths([1, 2, 3])
assert_equal(col.get_linestyles(), [(None, None)] * 6)
assert_equal(col.get_linewidths(), [1, 2, 3] * 2)
col.set_linestyles(['-', '-', '-'])
assert_equal(col.get_linestyles(), [(None, None)] * 3)
assert_equal(col.get_linewidths(), [1, 2, 3])
@image_comparison(baseline_images=['scatter_post_alpha'],
extensions=['png'], remove_text=True,
style='default')
def test_scatter_post_alpha():
fig, ax = plt.subplots()
sc = ax.scatter(range(5), range(5), c=range(5))
# this needs to be here to update internal state
fig.canvas.draw()
sc.set_alpha(.1)
| mit |
sthyme/ZFSchizophrenia | BehaviorAnalysis/Alternative_Analyses/Correlation_between_genes/correlations_DISTANCE_betweengenes.py | 1 | 5605 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import matplotlib.colors as mat_col
from matplotlib.colors import LinearSegmentedColormap
import scipy
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import set_link_color_palette
import numpy as np
import pandas as pd
import glob
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.spatial import distance
#Dig=pd.read_csv("all_regions_sum_nPix_perk_red_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_nPix_perk_green_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_thres50_newnames.csv")
#Dir=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_newnames.csv")
#Db=pd.read_csv("MAYbehaviorfullset_transposed.csv")
Db=pd.read_csv("AUG16_12_dectest.csv")
#Db=pd.read_csv("AUGMAY18testingfinalfullgoodonesoct30nonoise_transposed.csv")
#Dig = Dig.applymap(np.log)
#Digl = Dig # use if skipping log10
#Digl = Dig.applymap(np.log10)
#print Dig
#Digl = Digl.replace([np.inf, -np.inf], 0)
#Digl = Digl.replace([np.inf, -np.inf], np.nan)
# use if not doing log10
#Digl = Digl.replace([0], np.nan)
#Dig = Dig.replace([0], np.nan)
#DignoNA = Dig.dropna()
#Db = Db.apply(lambda x: [y if 0 < y < 0.05 else np.nan for y in x])
#Db = Db.apply(lambda x: [y if -0.05 < y < 0 else np.nan for y in x])
#print Db["adamtsl3"]
#for binarizing
# DEC 2018, THIS BINARIZING WORKS, BUT NOT DOIN GIT
# only binarizing the "non-significant" data
Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 1 for y in x])
# convert all non-significant values to large number
##Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 5 for y in x])
#print Db["adamtsl3"]
# keep all positive values, everything negative (between 0 and -0.05) becomes -1
##Db = Db.apply(lambda x: [y if y > 0 else -1 for y in x])
#print Db["adamtsl3"]
##Db = Db.apply(lambda x: [y if y < 2 else 0 for y in x])
#print Db["adamtsl3"]
# everything that is negative or 0 stays the same, everything else (between 0 and 0.05) becomes 1
##Db = Db.apply(lambda x: [y if y <= 0 else 1 for y in x])
#print Db["adamtsl3"]
#Db = Db.apply(lambda x: [y if y == np.nan else 1 for y in x])
#Db = Db.apply(lambda x: [y if y != np.nan else 0 for y in x])
# TRYING LOG ON P-VALUES, NOT SURE IF GOOD IDEA
#Db = Db.applymap(np.log10)
###Db = Db.apply(lambda x: [y if -0.1 < y < 0.1 else np.nan for y in x])
#print Db
#exit()
corrlist = []
dfdict = {}
dfdictdist = {}
collist = []
for column1 in Db:
for column2 in Db:
corr = Db[column1].corr(Db[column2], min_periods=6)
# dist = np.square(Db[column1] - Db[column2])
# print dist
dist = distance.euclidean(Db[column1], Db[column2])
# print dist
#corr = Db[column1].corr(Dig[column2], method='spearman', min_periods=7)
# if corr > 0.6 or corr < -0.6:
#corrlist.append( (corr, column1, column2))
#newdf = pd.concat([Dig[column2], Digl[column2], Db[column1]], axis=1)
newdf = pd.concat([Db[column2], Db[column1]], axis=1)
# newdf = newdf.dropna()
corrlist.append( (corr, newdf, column1, column2, dist))
if column1 in dfdict.keys():
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
else:
dfdict[column1] = []
dfdictdist[column1] = []
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
if column2 not in collist:
collist.append(column2)
#corrlist.append( (corr, column1, column2, newdf))
#newdf = Dig[column2].copy()
#newdf2 = newdf.concat(Db[column1])
#newdf[column1] = Db[column1]
#print newdf.dropna()
#exit()
# break
#break
#print dfdict
#print dfdictdist
#print collist
dfcor = pd.DataFrame.from_dict(dfdict, orient='index')
dfcor.columns = collist
dfdist = pd.DataFrame.from_dict(dfdictdist, orient='index')
dfdist.columns = collist
dfcor = dfcor.sort_index()
dfdist = dfdist.sort_index()
dfcor.to_csv("dec_correlation_sort1.csv")
dfdist.to_csv("dec_distance_sort1.csv")
#print dfcor
#corrlist.sort(key=lambda tup: tup[0])
#old way of just printing before generate the DF
##for i in range(0, len(corrlist)):
## print corrlist[i][0], corrlist[i][4], corrlist[i][2], corrlist[i][3]
#print corrlist[i][1]
#print corrlist[i][2]
#Db=pd.read_csv("MAY2018fullheatmapsetfinal_0.csv")
#Db = Db.transpose()
#Dig = Dig.values
#Dir = Dir.values
#Db = Db.values
#print "test1"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Dig
#print "test2"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Db
#Digb = Dig[:,1:]
#Dirb = Dir[:,1:]
#Digb = np.delete(Dig, 0, axis=1)
#Dbb = Db[:,1:]
#Dbb = np.delete(Db, 0, axis=1)
#Digb = np.log(Digb)
#Digb = Dig.values
#Dbb = Db.values
#print "test1"
#print Dbb
#print "test2"
#print Digb
#print np.shape(Dbb)
#print np.shape(Digb)
#for row in range(Digb.shape[0]):
#print str(pearsonr(Dbb[row,:], Digb[row,:]))
#print str(pearsonr(Dbb[:,row], Digb[:,row]))
#spearlist = []
#print "green correlation"
#for column1 in Digb.T:
# for column2 in Dbb.T:
# spearlist.append(str(spearmanr(column1, column2, nan_policy='omit')))
#spearlist.sort()
#for s in spearlist:
# print s
#print "red correlation"
#for column3 in Dirb.T:
# for column4 in Dbb.T:
# print str(pearsonr(column3, column4))
#for column1 in Dig:
# for column2 in Db:
# print column1.corr
#print "green correlation"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
#print Dig.corrwith(Db.set_axis(Dig.columns, axis='columns', inplace=False))
#print Dig.corrwith(Db)
#print "red correlation"
#Dir.corrwith(Db)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.