repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
phlizik/xbmc | lib/libUPnP/Platinum/Build/Tools/Scripts/GenSvnVersionHeader.py | 263 | 1526 | #! /usr/bin/python
#############################################################
# This tool is used to generate the version info file #
#############################################################
import sys
import os
# ensure that PLATINUM_HOME has been set and exists
if not os.environ.has_key('PLATINUM_KIT_HOME'):
print 'ERROR: PLATINUM_KIT_HOME not set'
sys.exit(1)
PLATINUM_KIT_HOME = os.environ['PLATINUM_KIT_HOME']
# ensure that PLATINUM_KIT_HOME has been set and exists
if not os.path.exists(PLATINUM_KIT_HOME) :
print 'ERROR: PLATINUM_KIT_HOME ('+PLATINUM_KIT_HOME+') does not exist'
sys.exit(1)
else :
print 'PLATINUM_KIT_HOME = ' + PLATINUM_KIT_HOME
# get the SVN repo version
version = os.popen('svnversion -n').readlines()[0]
print 'current VERSION =',version
if version.endswith('P'):
version = version[0:-1]
if version.endswith('MP'):
version = version[0:-2]
try:
version_int = int(version)+1 ## add one, because when we check it in, the rev will be incremented by one
except:
print 'ERROR: you cannot run this on a modified working copy'
sys.exit(1)
output = open(PLATINUM_KIT_HOME+'/Platinum/Source/Platinum/PltSvnVersion.h', 'w+')
output.write('/* DO NOT EDIT. This file was automatically generated by GenSvnVersionHeader.py */\n')
output.write('#define PLT_SVN_VERSION '+str(version_int)+'\n')
output.write('#define PLT_SVN_VERSION_STRING "'+str(version_int)+'"\n')
output.close()
print 'upon check-in, version will be', str(version_int)
| gpl-2.0 |
joshfriend/sqlalchemy-utils | tests/types/test_json.py | 2 | 1521 | # -*- coding: utf-8 -*-
from pytest import mark
import sqlalchemy as sa
from sqlalchemy_utils.types import json
from tests import TestCase
class JSONTestCase(TestCase):
def create_models(self):
class Document(self.Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, primary_key=True)
json = sa.Column(json.JSONType)
self.Document = Document
def test_list(self):
document = self.Document(
json=[1, 2, 3]
)
self.session.add(document)
self.session.commit()
document = self.session.query(self.Document).first()
assert document.json == [1, 2, 3]
def test_parameter_processing(self):
document = self.Document(
json={'something': 12}
)
self.session.add(document)
self.session.commit()
document = self.session.query(self.Document).first()
assert document.json == {'something': 12}
def test_non_ascii_chars(self):
document = self.Document(
json={'something': u'äääööö'}
)
self.session.add(document)
self.session.commit()
document = self.session.query(self.Document).first()
assert document.json == {'something': u'äääööö'}
@mark.skipif('json.json is None')
class TestSqliteJSONType(JSONTestCase):
pass
@mark.skipif('json.json is None')
class TestPostgresJSONType(JSONTestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
| bsd-3-clause |
837468220/python-for-android | python3-alpha/python3-src/Lib/test/testcodec.py | 203 | 1046 | """ Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| apache-2.0 |
openprocurement/openprocurement.search | openprocurement/search/source/plan.py | 2 | 9860 | # -*- coding: utf-8 -*-
from time import time, mktime
from datetime import datetime, timedelta
from iso8601 import parse_date
from socket import setdefaulttimeout
from retrying import retry
from openprocurement.search.source import BaseSource, TendersClient
from openprocurement.search.source.orgs import OrgsDecoder
from openprocurement.search.utils import restkit_error
from logging import getLogger
logger = getLogger(__name__)
class PlanSource(BaseSource):
"""Plan Source from open openprocurement.API.plans
"""
__doc_type__ = 'plan'
config = {
'plan_api_key': '',
'plan_api_url': "",
'plan_api_version': '0',
'plan_resource': 'plans',
'plan_api_mode': '',
'plan_skip_after': None,
'plan_skip_until': None,
'plan_limit': 1000,
'plan_preload': 10000,
'plan_reseteach': 3,
'plan_resethour': 23,
'plan_decode_orgs': False,
'plan_fast_client': False,
'plan_fast_stepsback': 10,
'plan_user_agent': '',
'plan_file_cache': '',
'plan_cache_minage': 15,
'timeout': 30,
}
def __init__(self, config={}, use_cache=False):
if config:
self.config.update(config)
self.config['plan_limit'] = int(self.config['plan_limit'] or 0) or 100
self.config['plan_preload'] = int(self.config['plan_preload'] or 0) or 100
self.config['plan_reseteach'] = int(self.config['plan_reseteach'] or 3)
self.config['plan_resethour'] = int(self.config['plan_resethour'] or 0)
self.client_user_agent += " (plans) " + self.config['plan_user_agent']
if use_cache:
self.cache_setpath(self.config['plan_file_cache'], self.config['plan_api_url'],
self.config['plan_api_version'], 'plans')
self.fast_client = None
self.client = None
self.orgs_db = None
def procuring_entity(self, item):
return item.data.get('procuringEntity', None)
def patch_version(self, item):
"""Convert dateModified to long version
"""
item['doc_type'] = self.__doc_type__
dt = parse_date(item['dateModified'])
version = 1e6 * mktime(dt.timetuple()) + dt.microsecond
item['version'] = long(version)
return item
def patch_plan(self, plan):
if 'date' not in plan['data']:
if 'datePublished' in plan['data']:
plan['data']['date'] = plan['data']['datePublished']
else:
planID = plan['data']['planID']
pos = planID.find('-20')
plan['data']['date'] = planID[pos+1:pos+11]
# decode official org name from EDRPOU registry
if self.config['plan_decode_orgs'] and self.orgs_db:
if 'procuringEntity' in plan['data']:
self.orgs_db.patch_entity(plan['data']['procuringEntity'])
return plan
def need_reset(self):
if self.should_reset:
return True
if time() - self.last_reset_time > 3600 * int(self.config['plan_reseteach']):
return True
if time() - self.last_reset_time > 3600:
return datetime.now().hour == int(self.config['plan_resethour'])
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def reset(self):
logger.info("Reset plans, plan_skip_until=%s plan_skip_after=%s",
self.config['plan_skip_until'], self.config['plan_skip_after'])
self.stat_resets += 1
if self.config['plan_decode_orgs']:
self.orgs_db = OrgsDecoder(self.config)
if self.config.get('timeout', None):
setdefaulttimeout(float(self.config['timeout']))
params = {}
if self.config['plan_api_mode']:
params['mode'] = self.config['plan_api_mode']
if self.config['plan_limit']:
params['limit'] = self.config['plan_limit']
self.client = TendersClient(
key=self.config['plan_api_key'],
host_url=self.config['plan_api_url'],
api_version=self.config['plan_api_version'],
resource=self.config['plan_resource'],
params=params,
timeout=float(self.config['timeout']),
user_agent=self.client_user_agent)
logger.info("PlansClient %s", self.client.headers)
if self.config['plan_fast_client']:
fast_params = dict(params)
fast_params['descending'] = 1
self.fast_client = TendersClient(
key=self.config['plan_api_key'],
host_url=self.config['plan_api_url'],
api_version=self.config['plan_api_version'],
resource=self.config['plan_resource'],
params=fast_params,
timeout=float(self.config['timeout']),
user_agent=self.client_user_agent+" fast_client")
for i in range(int(self.config['plan_fast_stepsback'])):
self.fast_client.get_tenders()
self.fast_client.params.pop('descending')
logger.info("PlansClient (fast) %s", self.fast_client.headers)
else:
self.fast_client = None
if self.config['plan_file_cache'] and self.cache_path:
cache_minage = int(self.config['plan_cache_minage'])
cache_date = datetime.now() - timedelta(days=cache_minage)
self.cache_allow_dateModified = cache_date.isoformat()
logger.info("[plan] Cache allow dateModified before %s",
self.cache_allow_dateModified)
self.skip_until = self.config.get('plan_skip_until', None)
if self.skip_until and self.skip_until[:2] != '20':
self.skip_until = None
self.skip_after = self.config.get('plan_skip_after', None)
if self.skip_after and self.skip_after[:2] != '20':
self.skip_after = None
self.last_reset_time = time()
self.should_reset = False
def preload(self):
preload_items = []
# try prelaod last plans first
if self.fast_client:
try:
items = self.fast_client.get_tenders()
self.stat_queries += 1
if not len(items):
logger.debug("Preload fast 0 plans")
raise ValueError()
preload_items.extend(items)
logger.info("Preload fast %d plans, last %s",
len(preload_items), items[-1]['dateModified'])
except:
pass
retry_count = 0
while True:
if retry_count > 3 or self.should_exit:
break
try:
items = self.client.get_tenders()
self.stat_queries += 1
except Exception as e:
retry_count += 1
logger.error("GET %s retry %d count %d error %s", self.client.prefix_path,
retry_count, len(preload_items), restkit_error(e, self.client))
self.sleep(5 * retry_count)
self.reset()
continue
if not items:
break
preload_items.extend(items)
if len(preload_items) >= 100:
logger.info("Preload %d plans, last %s",
len(preload_items), items[-1]['dateModified'])
if len(items) < 10:
self.fast_client = None
break
if len(preload_items) >= self.config['plan_preload']:
break
return preload_items
def items(self):
if not self.client:
self.reset()
self.last_skipped = None
for plan in self.preload():
if self.should_exit:
raise StopIteration()
if self.skip_until and self.skip_until > plan['dateModified']:
self.last_skipped = plan['dateModified']
self.stat_skipped += 1
continue
if self.skip_after and self.skip_after < plan['dateModified']:
self.last_skipped = plan['dateModified']
self.stat_skipped += 1
continue
self.stat_fetched += 1
yield self.patch_version(plan)
def cache_allow(self, data):
if data and data['data']['dateModified'] < self.cache_allow_dateModified:
return True
return False
def get(self, item):
plan = {}
retry_count = 0
if self.cache_path:
plan = self.cache_get(item)
while not plan:
if self.should_exit:
break
try:
plan = self.client.get_tender(item['id'])
assert plan['data']['id'] == item['id'], "plan.id"
assert plan['data']['dateModified'] >= item['dateModified'], "plan.dateModified"
except Exception as e:
if retry_count > 7:
raise e
retry_count += 1
logger.error("GET %s/%s retry %d error %s", self.client.prefix_path,
str(item['id']), retry_count, restkit_error(e, self.client))
self.sleep(10 * retry_count)
if retry_count > 5:
self.reset()
plan = {}
# save to cache
if plan and self.cache_path:
self.cache_put(plan)
if item['dateModified'] != plan['data']['dateModified']:
logger.debug("Plan dateModified mismatch %s %s %s",
item['id'], item['dateModified'],
plan['data']['dateModified'])
item['dateModified'] = plan['data']['dateModified']
item = self.patch_version(item)
plan['meta'] = item
self.stat_getitem += 1
return self.patch_plan(plan)
| apache-2.0 |
AndrewBMartin/pygurobi | pygurobi/pygurobi.py | 1 | 31972 | """
Functions to support rapid interactive modification of Gurobi models.
For reference on Gurobi objects such as Models, Variables, and Constraints, see
http://www.gurobi.com/documentation/7.0/refman/py_python_api_overview.html.
"""
import csv
import json
try:
import gurobipy as gp
except ImportError:
raise ImportError("gurobipy not installed. Please see {0} to download".format(
"https://www.gurobi.com/documentation/6.5/quickstart_mac/the_gurobi_python_interfac.html"))
# Assuming that constraints are of the form:
# constraintName(index1,index2,...,indexN).
# Asuming that variables are of the form:
# variableName[index1,index2,...,indexN]
CON_BRACKET_L = "("
CON_BRACKET_R = ")"
VAR_BRACKET_L = "["
VAR_BRACKET_R = "]"
# 13 July 2016 - Need to sort out capitalization here for attributes
# Attributes of a Gurobi variable
VAR_ATTRS = ["LB", "UB", "Obj", "VType", "VarName", "X", "Xn", "RC",
"BarX", "Start", "VarHintVal", "VarHintPri", "BranchPriority",
"VBasis", "PStart", "IISLB", "IISUB", "PWLObjCvx",
"SAObjLow", "SAObjUp", "SALBLow", "SALBUp",
"SAUBLow", "SAUBUp", "UnbdRay"]
# Attributes of a Gurobi constraint
CON_ATTRS = ["Sense", "RHS", "ConstrName", "Pi", "Slack",
"CBasis", "DStart", "Lazy", "IISConstr",
"SARHSLow", "SARHSUp", "FarkasDual"]
def read_model(filename):
"""
Read a model using gurobipy.
"""
m = gp.read(filename)
return m
def reoptimize(m):
"""
Update, reset, and optimize
a model.
"""
m.update()
m.reset()
m.optimize()
def get_variable_attrs():
"""
Return a list of variable attributes.
Details of attributes found at the Gurobi
website:
http://www.gurobi.com/documentation/6.5/refman/attributes.html
"""
return VAR_ATTRS
def get_constraint_attrs():
"""
Return a list of constraint attributes.
Details of attributes found at the Gurobi
website:
http://www.gurobi.com/documentation/6.5/refman/attributes.html
"""
return CON_ATTRS
def list_constraints(model):
"""
Print to screen the constraint sets in the model.
Show the name of each constraint set along with the
number of constraints in that set.
A constraint set is composed of all constraints
sharing the same string identifier before the indices:
A(2,3,4) and A(1,2,3) are in the same constraint set, A;
A(2,3,4) and B(2,3,4) are in constraint sets A and B, respectively
"""
sets = {}
constraints = model.getConstrs()
# Assuming constraint set name separated from indicies by
for c in constraints:
name = c.constrName
split_name = name.split(CON_BRACKET_L)
set_name = split_name[0]
if set_name not in sets:
sets[set_name] = 1
else:
sets[set_name] += 1
print "Constraint set, Number of constraints"
print "\n".join(["{0}, {1}".format(name, number) for name, number
in sorted(sets.items())])
def list_variables(model):
"""
Print to screen the variable sets in the model.
Show the name of each variable set along with the
number of variables in that set.
A variable set is composed of all variables
sharing the same string identifier before the indices:
A[2,3,4] and A[1,2,3] are in the same variable set, A;
A[2,3,4] and B[2,3,4] are in variable sets A and B, respectively
"""
sets = {}
variables = model.getVars()
# Assuming constraint set name separated from indicies by
for v in variables:
name = v.varName
split_name = name.split(VAR_BRACKET_L)
set_name = split_name[0]
if set_name not in sets:
sets[set_name] = 1
else:
sets[set_name] += 1
print "Variable set, Number of variables"
print "\n".join(["{0}, {1}".format(name, number) for name, number
in sorted(sets.items())])
def get_variables(model, name="", approx=False, filter_values={}, exclude=False):
"""
Return a list of variables from the model
selected by variable set name.
A variable set is composed of all variables
sharing the same string identifier before the indices:
A[2,3,4] and A[1,2,3] are in the same variable set, A;
A[2,3,4] and B[2,3,4] are in varaible sets A and B, respectively
PyGurobi by default assumes that *variable names* are separated
from indices by square brackets "[" and "]",
For example, variables look like x[i,j] - "x" in the variable set name,
and "i" and "j" and the variable's index values.
See the source code for more details.
"""
variables = []
if not name:
variables = model.getVars()
if not approx:
variables = [v for v in model.getVars()
if v.varName.split(VAR_BRACKET_L)[0] == name]
else:
variables = [v for v in model.getVars()
if name in v.varName.split(VAR_BRACKET_L)[0]]
if filter_values:
variables = filter_variables(variables, filter_values,
exclude=exclude)
return variables
def check_attr(attr, attributes):
"""
Check if the attr string case-insensitively corresponds to a
Gurobi attribute.
"""
for a in attributes:
if attr == a:
return True
if attr.lower() == a.lower():
return True
return False
def check_variable_attr(attr):
"""
Check if a string corresponds to a variable attribute.
Case-insensitive.
"""
var_attrs = get_variable_attrs()
return check_attr(attr, var_attrs)
def check_constraint_attr(attr):
"""
Check if a string corresponds to a constraint attribute.
Attributes are case-insensitive.
"""
con_attrs = get_constraint_attrs()
return check_attr(attr, con_attrs)
def get_variables_attr(attr, model="", name="", variables=""):
"""
Return a dictionary of variables names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of variables
"""
if not attr:
raise AttributeError("No attributes specified")
if not check_variable_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
# Make a list of attributes at the top and check against
# them to make sure that the specified attribute belongs.
if not model and not variables:
raise ValueError("No model or variable list given")
variables = variables_check(model, name, variables)
return {v.varName: getattr(v, attr) for v in variables}
def print_variables_attr(attr, model="", name="", variables=""):
"""
Print to screen a dictionary of variables names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of variables
"""
var_dict = get_variables_attr(attr, model=model,
name=name, variables=variables)
print "\n".join(["{0}, {1}".format(v, k) for v, k in
sorted(var_dict.items())])
def set_variables_attr(attr, val, model="", name="", variables=""):
"""
Set an attribute of a model variable set.
Specifiy either model and name parameters or supply a list of variables
"""
if not attr or not val:
raise AttributeError("No attribute or value specified")
return
if not check_variable_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
if not model and not variables:
raise ValueError("No model or variables specified")
variables = variables_check(model, name, variables)
for v in variables:
setattr(v, attr, val)
def zero_all_objective_coeffs(model):
"""
Set all objective coefficients in a model to zero.
"""
if not model:
raise ValueError("No model given")
for v in model.getVars():
v.Obj = 0
def set_variables_bounds(lb="", ub="", model="", name="", variables=""):
"""
Set the lower bound and/or upper bound for a variables set.
Specifiy either model and name parameters or supply a list of variables
"""
if lb:
set_variables_attr("lb", val=lb, model=model,
name=name, variables=variables)
if ub:
set_variables_attr("ub", val=ub, model=model,
name=name, variables=variables)
def remove_variables_from_model(model, name="", variables=""):
"""
Remove the given variables from the model.
Specifiy either model and name parameters or supply a list of constraints
"""
if not model and not variables:
raise ValueError("No model or variables given")
if not model:
raise ValueError("No model given")
variables = variables_check(model, name, variables)
for v in variables:
model.remove(v)
def variables_check(model, name, variables):
"""
Return the appropriate
variables based on the information supplied.
"""
if variables:
return variables
if model and name:
variables = get_variables(model, name)
if model and not name:
variables = model.getVars()
if not variables:
print "No variables found for\nmodel: {0},\nname: {1}".format(
model, name)
return variables
def get_variable_index_value(variable, index):
"""
Return the value of the given index
for a given variable.
Variable names are assumed to be given
as A[a,c,d, ....,f]
"""
value = variable.varName.split(",")[index].strip()
if VAR_BRACKET_R in value:
value = value[:-1]
elif VAR_BRACKET_L in value:
value = value.split(VAR_BRACKET_L)[1]
# Not expecting many variable index values to
# to be floats
if value.isdigit:
try:
value = int(value)
except ValueError:
pass
return value
def get_linexp_from_variables(variables):
"""
Return a linear expression from the supplied list
of variables.
"""
linexp = gp.LinExpr()
for v in variables:
linexp += v
return linexp
def sum_variables_by_index(index, model="", name="", variables=""):
"""
Return a dictionary mapping index values to the sum
of the solution values of all matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
var_dict = get_variables_by_index(index, model=model, name=name,
variables=variables)
if not var_dict:
raise ValueError("No variables found".format(index))
new_dict = {index_name: sum([v.X for v in index_vars])
for index_name, index_vars in
sorted(var_dict.items())}
return new_dict
def print_dict(dictionary):
"""
Print a dictionary to screen.
"""
print "\n".join(["{0}, {1}".format(index_name, index_value)
for index_name, index_value in
sorted(dictionary.items())])
def print_variables_sum_by_index(index, model="", name="", variables=""):
"""
Print a dictionary of variables, summed by index.
"""
var_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
print_dict(var_dict)
def get_variables_by_index(index, model="", name="", variables=""):
"""
Return a dictionary mapping index values to lists of
matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
if index != 0 and not index:
raise IndexError("No index given")
if not model and not variables:
raise ValueError("No model or variables given")
if not (name and model) and not variables:
raise ValueError("No variables specified")
variables = variables_check(model, name, variables)
var_dict = {}
for v in variables:
value = get_variable_index_value(v, index)
if value not in var_dict:
var_dict[value] = [v]
else:
var_dict[value].append(v)
return var_dict
def filter_variables(variables, filter_values, exclude=False):
"""
Return a new list of variables that match the filter values
from the given variables list.
"""
if not variables:
raise ValueError("variables not given")
if not filter_values:
raise ValueError("Dictionary of filter values not given")
new_vars = []
for v in variables:
add = True
for index, value in filter_values.iteritems():
key = get_variable_index_value(v, index)
if key != value:
add = False
break
if add:
new_vars.append(v)
if exclude:
new_vars = [v for v in (set(variables)-set(new_vars))]
return new_vars
def get_variables_by_index_values(model, name, index_values, exclude=False):
variables = get_variables(model, name, index_values, exclude)
return variables
def get_variables_by_two_indices(index1, index2, model="", name="", variables=""):
"""
Return a dictionary of variables mapping index1 values
to dictionaries mapping
index2 values to matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
two_indices_dict = {}
index1_dict = get_variables_by_index(index1, model=model, name=name,
variables=variables)
for key, value in index1_dict.iteritems():
two_indices_dict[key] = get_variables_by_index(index2, variables=value)
return two_indices_dict
def print_variables(variables):
"""
Print a list of variables to look good.
"""
print "\n".join([v.varName for v in variables])
def sum_variables_by_two_indices(index1, index2, model="", name="", variables=""):
"""
Return a dictionary mapping index1 values
to dictionaries of the given variables summed over index2.
"""
two_indices_dict = get_variables_by_two_indices(index1, index2,
model=model, name=name, variables=variables)
if not two_indices_dict:
raise ValueError("Inputs did not match with model variables")
new_dict = {}
for key, var_dict in two_indices_dict.iteritems():
new_dict[key] = {index_name: sum([v.X for v in index_vars])
for index_name, index_vars in
sorted(var_dict.items())}
return new_dict
def print_two_indices_dict(indices_dict):
"""
Print to screen a two level nested dictionary.
"""
for key, value in indices_dict.iteritems():
print "\n{0}".format(key)
print_dict(value)
def get_linexp_by_index(index, model="", name="", variables=""):
"""
Return a dictionary of index values to Gurobi linear expressions
corresponding to the summation of variables that match the index
value for the given index number.
Specifiy either model and name parameters or supply a list of variables.
"""
linexps = {}
variables = variables_check(model, name, variables)
for v in variables:
value = get_variable_index_value(v, index)
if value not in linexps:
linexps[value] = gp.LinExpr(v)
else:
linexps[value] += v
return linexps
def print_constraints(constraints):
"""
Print constraints in an aesthetically pleasing way.
"""
print "\n".join([c.constrName for c in constraints])
def get_constraints_multiple(model, names_list, approx=False):
"""
Return a list of constraints given by the constraint
set names in names_list.
"""
cons_list = []
for name in names_list:
cons_list.extend(get_constraints(model, name, approx))
return cons_list
def filter_constraints(constraints, filter_values, exclude=False):
"""
Return a new list of constraints that match the filter values from
the given constraints list.
"""
if not constraints:
raise ValueError("constraints not given")
if not filter_values:
raise ValueError("Dictionary of filter values not given")
new_cons = []
for c in constraints:
add = True
for index, value in filter_values.iteritems():
key = get_constraint_index_value(c, index)
try:
key.replace('"', "")
except AttributeError:
pass
if key != value:
add = False
break
if add:
new_cons.append(c)
if exclude:
# May want to add sorting by varName here
new_cons = [c for c in (set(constraints)-set(new_cons))]
return new_cons
def get_constraints(model, name="", approx=False, filter_values={},
exclude=False):
"""
Return a list of constraints from the model
selected by constraint set name.
A constraint set is composed of all constraints
sharing the same string identifier before the indices:
A(2,3,4) and A(1,2,3) are in the same constraint set, A;
A(2,3,4) and B(2,3,4) are in constraint sets A and B, respectively
PyGurobi by default assumes that constraint set names are
separated from indices by round brackets
"(" and ")". For example, constraints look like env(r,t) - where "env"
in the constraint set name
and "r" and "t" are the index values. See the source for more details.
"""
if not name:
return model.getConstrs()
constraints = []
if not approx:
constraints = [c for c in model.getConstrs()
if c.constrName.split(CON_BRACKET_L)[0] == name]
else:
constraints = [c for c in model.getConstrs()
if name in c.constrName.split(CON_BRACKET_L)[0]]
if filter_values:
constraints = filter_constraints(constraints, filter_values, exclude)
return constraints
def constraints_check(model, name, constraints):
"""
Check to see whether the user specified a list
of constraints or expects them to be retrieved
from the model.
"""
if constraints:
return constraints
if model and name:
constraints = get_constraints(model, name)
elif model and not name:
constraints = model.getConstrs()
return constraints
def get_constraints_attr(attr, model="", name="", constraints=""):
"""
Return a dictionary of constraint names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of constraints
"""
if not attr:
raise AttributeError("No attributes specified")
if not check_constraint_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a constraint attribute.".format(attr),
"Get list of all variables attributes with the",
"get_constraint_attrs() method."))
# Check if the attr supplied is not a viable model attribute
if not model and not constraints:
raise ValueError("No model or constraint list given")
constraints = constraints_check(model, name, constraints)
return {c.constrName: getattr(c, attr) for c in constraints}
def print_constraints_attr(attr, model="", name="", constraints=""):
"""
Print to screen a list of constraint attribute values
given by the constraints specified in the names parameter.
Specifiy either model and name parameters or supply a list of constraints
"""
constraints = get_constraints_attr(attr, model=model,
name=name, constraints=constraints)
print "\n".join(["{0}, {1}".format(c, k)
for c, k in sorted(constraints.items())])
def set_constraints_attr(attr, val, model="", name="", constraints=""):
"""
Set an attribute of a model constraint set.
Specifiy either model and name parameters or supply a list of constraints
"""
if not attr or not val:
raise AttributeError("No attribute or value specified")
if not check_constraint_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
if not model and not constraints:
raise ValueError("No model or constraints specified")
constraints = constraints_check(model, name, constraints)
for c in constraints:
setattr(c, attr, val)
def set_constraints_rhs_as_percent(percent, model="", name="", constraints=""):
"""
Set the right hand side (rhs) of a constraint set as a percentage of its current rhs.
Specifiy either model and name parameters or supply a list of constraints
"""
if percent != 0 and not percent:
print "Error: No percent specified."
return
try:
percent = float(percent)
except ValueError:
raise ValueError("Percent must be a number. Percent: {}".format(percent))
if not model and not constraints:
raise ValueError("No model or constraints specified.")
constraints = constraints_check(model, name, constraints)
for c in constraints:
cur_rhs = getattr(c, "rhs")
setattr(c, "rhs", percent*cur_rhs)
def remove_constraints_from_model(model, name="", constraints=""):
"""
Remove the given constraints from the model.
Specifiy either model and name parameters or supply a list of constraints
"""
if not model and not constraints:
raise ValueError("No model or constraints given")
if not model:
raise ValueError("No model given")
# This is needed for the case where a list of
# constraints is provided because a model object
# must be provided
if not constraints:
constraints = constraints_check(model, name, constraints)
for c in constraints:
model.remove(c)
def get_constraint_index_value(constraint, index):
"""
Return the value of the given index
for a given constraint.
Constraint names are assumed to be given
as A(a,c,d, ....,f)
"""
value = constraint.constrName.split(",")[index].strip()
if CON_BRACKET_R in value:
value = value[:-1]
elif CON_BRACKET_L in value:
value = value.split(CON_BRACKET_L)[1]
# Not expecting many constraint index values to
# to be floats
if value.isdigit:
try:
value = int(value)
except ValueError:
pass
return value
def get_constraints_by_index(index, model="", name="", constraints=""):
"""
Return a dictionary mapping index values to lists of
constraints having that index value.
Specifiy either model and name parameters or supply a list of constraints
"""
if index != 0 and not index:
raise IndexError("No index given")
if not model and not constraints:
raise ValueError("No model or constraints given")
if not (name and model) and not constraints:
raise ValueError("No constraints specified")
constraints = constraints_check(model, name, constraints)
con_dict = {}
for c in constraints:
value = get_constraint_index_value(c, index)
if value not in con_dict:
con_dict[value] = [c]
else:
con_dict[value].append(c)
return con_dict
def get_constraints_by_index_values(model, name, index_values, exclude=False):
"""
Return a list of constraints filtered by index values.
If exlude is False then return constraints that match the filters.
If exclude is True than return constraints that do not match the filters.
"""
constraints = get_constraints(model, name, index_values, exclude)
return constraints
def get_grb_sense_from_string(sense):
"""
Return the GRB constraint sense object
corresponding to the supplied string.
Convention follows the Gurobi docs:
https://www.gurobi.com/documentation/6.5/refman/sense.html#attr:Sense
"""
if sense == "<":
return gp.GRB.LESS_EQUAL
elif sense == ">":
return gp.GRB.GREATER_EQUAL
elif sense == "=":
return gp.GRB.EQUAL
else:
raise ValueError("Constraint sense is not '<', '>', '='")
def add_constraint_constant(model, variables, constant, sense="<",
con_name=""):
"""
Add constraint to model that says the sum of
variables must be equal, less than or equal, or, greater than or equal, a constant.
"""
if not variables:
raise ValueError("variables list not provided")
linexp = get_linexp_from_variables(variables)
sense = get_grb_sense_from_string(sense)
if not con_name:
model.addConstr(linexp, sense, constant)
else:
model.addConstr(linexp, sense, constant, con_name)
def check_if_name_a_variable(name, model):
"""
Check if the supplied name corresponds to
a variable set name in the given model.
"""
variables = get_variables(model, name)
if not variables:
return False
return True
def check_if_name_a_constraint(name, model):
"""
Check if the supplied name corresopnd to
a constraint set name in the given model.
"""
constraints = get_constraints(model, name)
if not constraints:
return False
return True
def add_constraint_variables(model, variables1, variables2,
sense="=", con_name=""):
"""
Add constraint to model that says the sum of
a list of variables must be equal, less than or equal,
or greater than or equal, the sum of another list of variables.
"""
if not variables1 or not variables2:
ValueError("Variables list not provided")
linexp1 = get_linexp_from_variables(variables1)
linexp2 = get_linexp_from_variables(variables2)
sense = get_grb_sense_from_string(sense)
if not con_name:
model.addConstr(linexp1, sense, linexp2)
else:
model.addConstr(linexp1, sense, linexp2, con_name)
def graph_by_index(model, variables, index, title="", y_axis="", x_axis=""):
"""
Display a graph of the variable against the specified index
using matplotlib.
Matplotlib must already be installed to use this.
See: http://matplotlib.org/faq/installing_faq.html
"""
try:
import matplotlib.pyplot as plot
except ImportError:
raise ImportError("{0}\n{1}".format(
"Module Matplotlib not found.",
"Please download and install Matplotlib to use this function."))
fig = plot.figure()
ax = fig.add_subplot(111)
variables_sum = sum_variables_by_index(index, variables=variables)
keys, values = zip(*variables_sum.items())
y = range(len(variables_sum))
if title:
ax.set_title(title)
if y_axis:
ax.set_ylabel(y_axis)
if x_axis:
ax.set_xlabel(x_axis)
ax.bar(y, values)
#ax.legend(keys)
plot.show()
def graph_by_two_indices(model, variables, index1, index2, title="",
y_axis="", x_axis=""):
"""
Display a graph of the variable summed over index2
given by index1.
Matplotlib must already be installed to use this.
See: http://matplotlib.org/faq/installing_faq.html
"""
try:
import matplotlib.pyplot as plot
except ImportError:
raise ImportError("{0}\n{1}".format(
"Module Matplotlib not found.",
"Please download and install Matplotlib to use this function."))
fig = plot.figure()
ax = fig.add_subplot(111)
# We need to do this in reverse order to prepare it for graphing
variables_sum = sum_variables_by_two_indices(index2, index1,
variables=variables)
keys, values = zip(*variables_sum.items())
colours = ["b", "g", "r", "c", "y", "m", "k", "w"]
y = range(len(values[0]))
print y
if title:
ax.set_title(title)
if y_axis:
ax.set_ylabel(y_axis)
if x_axis:
ax.set_xlabel(x_axis)
bars = []
prev_bars = [0 for bar in y]
colour_count = 0
for key, value in variables_sum.iteritems():
cur_bars = [k[1] for k in sorted(value.items(), key=lambda x: x[0])]
bars.append(ax.bar(y, cur_bars, bottom=prev_bars,
color=colours[colour_count]))
prev_bars = cur_bars
colour_count += 1
if colour_count == len(colours) - 1:
colour_count = 0
ax.legend(keys)
plot.show()
def print_variables_to_csv(file_name, model="", name="", variables=""):
"""
Print the specified variables to a csv file
given by the file_name parameter.
If no variables specified than all model
variables written.
"""
if ".csv" not in file_name:
raise ValueError("Non csv file specified")
with open(file_name, "wb+") as write_file:
writer = csv.writer(write_file)
headers = ["Variable name", "Value"]
writer.writerow(headers)
variables = variables_check(model, name, variables)
# This will put quotes around strings, because the variable
# names have commas in them.
writer.writerows([ [v.varName, v.X] for v in variables])
def print_variables_to_csv_by_index(file_name, index,
model="", name="", variables=""):
"""
Print the sums of variables by the specified index
to a csv file.
Default behaviour of the function is to overwrite
the given file_name.
"""
if ".csv" not in file_name:
raise ValueError("Non csv file specified")
with open(file_name, "wb+") as write_file:
writer = csv.writer(write_file)
headers = ["Index", "Value"]
writer.writerow(headers)
variables_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
if not variables_dict:
raise ValueError("No variables found")
writer.writerows([ [key, value]
for key, value in sorted(variables_dict.items())])
def print_variables_to_json_by_index(file_name, index, model="",
name="", variables="", index_alias=""):
"""
Print the specified variables to a json file given by file_name
organized by the specified index.
Formatted for reading into nvD3 applications.
Default behaviour is to overwrite file if one exists in
file_name's location.
"""
if ".json" not in file_name:
raise ValueError("Non json file specified")
index_name = index
if index_alias:
index_name = index_alias
var_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
data = {index_name: [{ index_name: var_dict }] }
json.dump(data, open(file_name, "wb"))
| mit |
MIPS/external-chromium_org-third_party-skia | gm/rebaseline_server/column.py | 21 | 2846 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
ColumnHeaderFactory class (see class docstring for details)
"""
# Keys used within dictionary representation of each column header.
# NOTE: Keep these in sync with static/constants.js
KEY__EXTRACOLUMNHEADERS__HEADER_TEXT = 'headerText'
KEY__EXTRACOLUMNHEADERS__HEADER_URL = 'headerUrl'
KEY__EXTRACOLUMNHEADERS__IS_FILTERABLE = 'isFilterable'
KEY__EXTRACOLUMNHEADERS__IS_SORTABLE = 'isSortable'
KEY__EXTRACOLUMNHEADERS__USE_FREEFORM_FILTER = 'useFreeformFilter'
KEY__EXTRACOLUMNHEADERS__VALUES_AND_COUNTS = 'valuesAndCounts'
class ColumnHeaderFactory(object):
"""Factory which assembles the header for a single column of data."""
def __init__(self, header_text, header_url=None,
is_filterable=True, is_sortable=True,
use_freeform_filter=False):
"""
Args:
header_text: string; text the client should display within column header.
header_url: string; target URL if user clicks on column header.
If None, nothing to click on.
is_filterable: boolean; whether client should allow filtering on this
column.
is_sortable: boolean; whether client should allow sorting on this column.
use_freeform_filter: boolean; *recommendation* to the client indicating
whether to allow freeform text matching, as opposed to listing all
values alongside checkboxes. If is_filterable==false, this is
meaningless.
"""
self._header_text = header_text
self._header_url = header_url
self._is_filterable = is_filterable
self._is_sortable = is_sortable
self._use_freeform_filter = use_freeform_filter
def create_as_dict(self, values_and_counts_dict=None):
"""Creates the header for this column, in dictionary form.
Creates the header for this column in dictionary form, as needed when
constructing the JSON representation. Uses the KEY__EXTRACOLUMNHEADERS__*
constants as keys.
Args:
values_and_counts_dict: dictionary mapping each possible column value
to its count (how many entries in the column have this value), or
None if this information is not available.
"""
asdict = {
KEY__EXTRACOLUMNHEADERS__HEADER_TEXT: self._header_text,
KEY__EXTRACOLUMNHEADERS__IS_FILTERABLE: self._is_filterable,
KEY__EXTRACOLUMNHEADERS__IS_SORTABLE: self._is_sortable,
KEY__EXTRACOLUMNHEADERS__USE_FREEFORM_FILTER: self._use_freeform_filter,
}
if self._header_url:
asdict[KEY__EXTRACOLUMNHEADERS__HEADER_URL] = self._header_url
if values_and_counts_dict:
asdict[KEY__EXTRACOLUMNHEADERS__VALUES_AND_COUNTS] = sorted(
values_and_counts_dict.items())
return asdict
| bsd-3-clause |
timonwong/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/pygments/formatters/svg.py | 76 | 5840 | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| mit |
maciekcc/tensorflow | tensorflow/contrib/layers/python/layers/embedding_ops_test.py | 86 | 31950 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""embedding_ops tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import sys
import numpy as np
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[vocab_size, embed_dim],
slicing=[num_shards, 1],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
embedding_weights = [w.eval() for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class ScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_scattered_embedding_consistency(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 10])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
def test_scattered_embedding_multiple_partition(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=7)
values = constant_op.constant([4, 4, 5])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=5).eval()
self.assertAllEqual(embedding_lookup_result.shape, [3, 5])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
# Different embedding expected for different value.
embedding_diff = np.min(
(embedding_lookup_result[2] - embedding_lookup_result[0])**2)
self.assertGreater(embedding_diff, 0)
def test_scattered_embedding_coverage(self):
with self.test_session():
size = 8
embedding_weights = self._random_weights(size=size, num_shards=3)
values = constant_op.constant(["foo"])
# Large embedding dimension to cover the full range of weights.
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=100).eval()
self.assertEqual(len(np.unique(embedding_lookup_result[0])), size)
def test_scattered_embedding_multi_dimension(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 10])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
def test_scattered_embedding_lookup_sparse(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=["foo", "bar", "foo", "bar"],
indices=[[0, 0], [1, 0], [1, 1], [3, 0]],
dense_shape=[5, 2])
embedding_lookup_result = (
embedding_ops.scattered_embedding_lookup_sparse(
embedding_weights, sparse_tensor, dimension=5,
combiner="mean").eval())
self.assertAllEqual(embedding_lookup_result.shape, [5, 5])
# Same non-zero embedding for the empty rows filled with a default value.
self.assertAllEqual(embedding_lookup_result[2],
embedding_lookup_result[4])
embedding_norm = np.sum(embedding_lookup_result[2]**2)
self.assertGreater(embedding_norm, 0)
self.assertAllEqual(embedding_lookup_result[1], 0.5 * (
embedding_lookup_result[0] + embedding_lookup_result[3]))
def test_embedding_lookup_unique(self):
d_embed = 5
n_embed = 10
idx_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
idx = np.random.randint(0, n_embed, idx_shape)
with self.test_session():
embedded_np = embeds[idx]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
def test_embedding_lookup_unique_param3d(self):
embeds = np.random.randn(5, 3, 3)
idx = np.random.randint(0, 5, 10)
idx2d = np.random.randint(0, 5, (10, 2))
with self.test_session():
embedded_np = embeds[idx]
embedded_np2d = embeds[idx2d]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
embedded_tf_lst = embedding_ops.embedding_lookup_unique([embeds],
idx).eval()
embedded_tf2d = embedding_ops.embedding_lookup_unique(embeds,
idx2d).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
self.assertEqual(embedded_np.shape, embedded_tf_lst.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf_lst)
self.assertEqual(embedded_np2d.shape, embedded_tf2d.shape)
np.testing.assert_almost_equal(embedded_np2d, embedded_tf2d)
class SampledScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_hashed_embedding_consistency(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
# The first three sampled_candidates are equal, so the first three
# embedding weights will be equal.
sampled_candidates = constant_op.constant([[1, 3, 4, 6], [1, 3, 4, 7]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 4])
self.assertAllEqual(embedding_lookup_result[0][:3],
embedding_lookup_result[1][:3])
self.assertNotEqual(embedding_lookup_result[0][3],
embedding_lookup_result[1][3])
def test_hashed_embedding_multi_dimension(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
sampled_candidates = constant_op.constant(
[[[1, 3, 4, 6], [1, 7, 8, 9], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9], [1, 3, 4, 6]]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 4])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
invalid_indices = constant_op.constant([[[1, 3, 4, 6], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9]]])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, (
r"\[The shape of sampled_candidates: \] \[2 2 4\] "
r"\[ does not match the shape of values: \] \[2 3\]")):
# pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights, values,
sampled_candidates=invalid_indices).eval()
class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def test_output_shape(self):
"""Verifies the shape of the output tensor."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
self.assertEqual(result.eval().shape, (3, 4))
def test_output_values(self):
"""Verifies the values in a trivial case."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=5, hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.,
0.], [.3, .2, .2, .3, .1],
[0., 0., 0., 0., 0.]])
def test_output_values_with_sampled_candidates(self):
"""Verifies the values for given sampled_candidates."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
sampled_candidates = [[1, 0], [2, 1], [3, 2]]
sampled_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
sampled_candidates=constant_op.constant(sampled_candidates),
hash_key=self._hash_key)
full_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
sampled_result_val = sampled_result.eval()
full_result_val = full_result.eval()
self.assertEqual(sampled_result_val.shape, (3, 2))
for i in range(len(sampled_candidates)):
self.assertAllClose(sampled_result_val[i],
full_result_val[i, sampled_candidates[i]])
def test_output_values_with_sign_hash(self):
"""Verifies the values in a trivial case with hash_signs=True."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .1, .1])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
dimension=4,
with_sign_hash=True,
hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.], [-.1, -.1, -.1, .1],
[0., 0., 0., 0.]])
def test_distributive_property(self):
"""Verifies the distributive property of matrix multiplication."""
with self.test_session():
params = constant_op.constant([.1, .2, .3])
sp_values_a = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[0, 0]], dense_shape=[3, 1])
sp_values_b = sparse_tensor_lib.SparseTensor(
values=["b"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values_c = sparse_tensor_lib.SparseTensor(
values=["c"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "b", "c"],
indices=[[0, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
result_a = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_a, dimension=4, hash_key=self._hash_key)
result_b = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_b, dimension=4, hash_key=self._hash_key)
result_c = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_c, dimension=4, hash_key=self._hash_key)
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
result_abc = math_ops.add_n([result_a, result_b, result_c])
self.assertAllClose(result.eval(), result_abc.eval())
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupSparseWithDistributedAggregationTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.test_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = \
embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
self.assertAllClose(np_embedding_sum, tf_embedding_sum)
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.test_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.test_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
x, sp_ids, sp_weights, combiner="mean")
if __name__ == "__main__":
test.main()
| apache-2.0 |
ltilve/chromium | third_party/pymock/mock.py | 424 | 75527 | # mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 1.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0.1'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps as original_wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
f.__wrapped__ = original
return f
return inner
else:
if sys.version_info[:2] >= (3, 3):
wraps = original_wraps
else:
def wraps(func):
def inner(f):
f = original_wraps(func)(f)
f.__wrapped__ = func
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
if inPy3k:
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
else:
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
if inPy3k:
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
else:
signature = inspect.formatargspec(
regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_delegate
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
exc_info = tuple()
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
| bsd-3-clause |
tonybaloney/st2contrib | packs/cubesensors/sensors/measurements_sensor.py | 7 | 5820 | import time
from rauth import OAuth1Session
from st2common.util import isotime
from st2reactor.sensor.base import PollingSensor
__all__ = [
'CubeSensorsMeasurementsSensor'
]
BASE_URL = 'https://api.cubesensors.com/v1'
FIELD_CONVERT_FUNCS = {
'temp': lambda value: (float(value) / 100)
}
class CubeSensorsMeasurementsSensor(PollingSensor):
DATASTORE_KEY_NAME = 'last_measurements_timestamp'
def __init__(self, sensor_service, config=None, poll_interval=None):
super(CubeSensorsMeasurementsSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._device_uids = self._config['sensor'].get('device_uids', [])
self._logger = self._sensor_service.get_logger(__name__)
self._device_info_cache = {}
self._last_measurement_timestamps = {} # maps device_uid -> last mes. timestamp
def setup(self):
if not self._device_uids:
raise ValueError('No "device_uids" configured!')
self._session = self._get_session()
# todo cache deviice names
# Populate device info cache
for device_uid in self._device_uids:
data = self._get_device_info(device_uid=device_uid)
self._device_info_cache[device_uid] = data
def poll(self):
for device_uid in self._device_uids:
result = self._get_measurements(device_uid=device_uid)
if not result:
continue
self._handle_result(device_uid=device_uid, result=result)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _handle_result(self, device_uid, result):
existing_last_measurement_timestamp = self._get_last_measurement_timestamp(
device_uid=device_uid)
new_last_measurement_timestamp = isotime.parse(result['time'])
new_last_measurement_timestamp = int(time.mktime(
new_last_measurement_timestamp.timetuple())) # pylint: disable=no-member
if (existing_last_measurement_timestamp and
new_last_measurement_timestamp <= existing_last_measurement_timestamp):
# We have already seen this measurement, skip it
self._logger.debug(('No new measurements, skipping results we have already seen'
'for device %s' % (device_uid)))
return
# Dispatch trigger
self._dispatch_trigger(device_uid=device_uid, result=result)
# Store last measurement timestamp
self._set_last_measurement_timestamp(
device_uid=device_uid, last_measurement_timestamp=new_last_measurement_timestamp)
def _get_last_measurement_timestamp(self, device_uid):
"""
Retrieve last measurement timestamp for a particular device.
:rtype: ``int``
"""
last_measurement_timestamp = self._last_measurement_timestamps.get(device_uid, None)
if not last_measurement_timestamp:
name = self._get_datastore_key_name(device_uid=device_uid)
value = self._sensor_service.get_value(name=name)
self._last_measurement_timestamps[device_uid] = int(value) if value else 0
return self._last_measurement_timestamps[device_uid]
def _set_last_measurement_timestamp(self, device_uid, last_measurement_timestamp):
"""
Store a last measurement timestamp for a particular device.
"""
self._last_measurement_timestamps[device_uid] = last_measurement_timestamp
name = self._get_datastore_key_name(device_uid=device_uid)
value = self._sensor_service.get_value(name=name)
value = str(last_measurement_timestamp)
self._sensor_service.set_value(name=name, value=value)
return last_measurement_timestamp
def _get_datastore_key_name(self, device_uid):
name = self.DATASTORE_KEY_NAME + '.' + device_uid
return name
def _dispatch_trigger(self, device_uid, result):
trigger = 'cubesensors.measurements'
device_info = self._device_info_cache.get(device_uid, {})
device_name = device_info.get('extra', {}).get('name', 'unknown')
payload = {
'device_uid': device_uid,
'device_name': device_name,
'measurements': result
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _get_device_info(self, device_uid):
response = self._session.get('%s/devices/%s' % (BASE_URL, device_uid))
data = response.json()
return data['device']
def _get_measurements(self, device_uid):
"""
Retrieve measurements for a particular device.
"""
response = self._session.get('%s/devices/%s/current' % (BASE_URL, device_uid))
data = response.json()
values = data['results'][0]
field_list = data['field_list']
result = {}
for index, field_name in enumerate(field_list):
value = values[index]
convert_func = FIELD_CONVERT_FUNCS.get(field_name, None)
if convert_func:
value = convert_func(value=value)
result[field_name] = value
return result
def _get_session(self):
session = OAuth1Session(consumer_key=self._config['consumer_key'],
consumer_secret=self._config['consumer_secret'],
access_token=self._config['access_token'],
access_token_secret=self._config['access_token_secret'])
return session
| apache-2.0 |
wengole/channels | channels/backends/__init__.py | 2 | 1129 | from django.utils.module_loading import import_string
class InvalidChannelBackendError(ValueError):
pass
class BackendManager(object):
"""
Takes a settings dictionary of backends and initialises them.
"""
def __init__(self, backend_configs):
self.configs = backend_configs
self.backends = {}
def make_backend(self, name):
# Load the backend class
try:
backend_class = import_string(self.configs[name]['BACKEND'])
except KeyError:
raise InvalidChannelBackendError("No BACKEND specified for %s" % name)
except ImportError as e:
raise InvalidChannelBackendError("Cannot import BACKEND %r specified for %s" % (self.configs[name]['BACKEND'], name))
# Initialise and pass config
instance = backend_class(**{k.lower(): v for k, v in self.configs[name].items() if k != "BACKEND"})
instance.alias = name
return instance
def __getitem__(self, key):
if key not in self.backends:
self.backends[key] = self.make_backend(key)
return self.backends[key]
| bsd-3-clause |
JeffRoy/mi-dataset | mi/dataset/driver/pco2w_abc/imodem/pco2w_abc_imodem_telemetered_driver.py | 1 | 2797 | #!/usr/bin/env python
"""
@package mi.dataset.driver.pco2w_abc.imodem
@file mi-dataset/mi/dataset/driver/pco2w_abc/imodem/pco2w_abc_imodem_recovered_driver.py
@author Mark Worden
@brief Driver for the pco2w_abc_imodem instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.pco2w_abc_imodem import Pco2wAbcImodemParser
from mi.dataset.parser.pco2w_abc_particles import \
Pco2wAbcParticleClassKey, \
Pco2wAbcImodemInstrumentBlankTelemeteredDataParticle, \
Pco2wAbcImodemInstrumentTelemeteredDataParticle, \
Pco2wAbcImodemPowerTelemeteredDataParticle, \
Pco2wAbcImodemControlTelemeteredDataParticle, \
Pco2wAbcImodemMetadataTelemeteredDataParticle
from mi.core.versioning import version
@version("15.6.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rU') as stream_handle:
driver = Pco2wAbcImodemTelemeteredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class Pco2wAbcImodemTelemeteredDriver(SimpleDatasetDriver):
"""
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.pco2w_abc_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
Pco2wAbcParticleClassKey.METADATA_PARTICLE_CLASS:
Pco2wAbcImodemMetadataTelemeteredDataParticle,
Pco2wAbcParticleClassKey.POWER_PARTICLE_CLASS:
Pco2wAbcImodemPowerTelemeteredDataParticle,
Pco2wAbcParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
Pco2wAbcImodemInstrumentTelemeteredDataParticle,
Pco2wAbcParticleClassKey.INSTRUMENT_BLANK_PARTICLE_CLASS:
Pco2wAbcImodemInstrumentBlankTelemeteredDataParticle,
Pco2wAbcParticleClassKey.CONTROL_PARTICLE_CLASS:
Pco2wAbcImodemControlTelemeteredDataParticle,
}
}
parser = Pco2wAbcImodemParser(parser_config,
stream_handle,
self._exception_callback)
return parser
| bsd-2-clause |
imcom/pyleus | docs/source/conf.py | 9 | 6716 | # -*- coding: utf-8 -*-
#
# Pyleus documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 30 08:01:12 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyleus'
copyright = u'2013–2014 Yelp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pyleus import __version__
# The full version, including alpha/beta/rc tags.
release = version = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyleusdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyleus.tex', u'Pyleus Documentation',
u'Yelp', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 |
alrifqi/django | tests/postgres_tests/test_json.py | 284 | 7890 | import datetime
import unittest
from django.core import exceptions, serializers
from django.db import connection
from django.test import TestCase
from . import PostgreSQLTestCase
from .models import JSONModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import JSONField
except ImportError:
pass
def skipUnlessPG94(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90400:
return unittest.skip('PostgreSQL >= 9.4 required')(test)
return test
@skipUnlessPG94
class TestSaveLoad(TestCase):
def test_null(self):
instance = JSONModel()
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, None)
def test_empty_object(self):
instance = JSONModel(field={})
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, {})
def test_empty_list(self):
instance = JSONModel(field=[])
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, [])
def test_boolean(self):
instance = JSONModel(field=True)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, True)
def test_string(self):
instance = JSONModel(field='why?')
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 'why?')
def test_number(self):
instance = JSONModel(field=1)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 1)
def test_realistic_object(self):
obj = {
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
}
instance = JSONModel(field=obj)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, obj)
@skipUnlessPG94
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
JSONModel.objects.create(field=None),
JSONModel.objects.create(field=True),
JSONModel.objects.create(field=False),
JSONModel.objects.create(field='yes'),
JSONModel.objects.create(field=7),
JSONModel.objects.create(field=[]),
JSONModel.objects.create(field={}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
'k': {'l': 'm'},
}),
JSONModel.objects.create(field=[1, [2]]),
JSONModel.objects.create(field={
'k': True,
'l': False,
}),
]
def test_exact(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={}),
[self.objs[6]]
)
def test_exact_complex(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={'a': 'b', 'c': 1}),
[self.objs[7]]
)
def test_isnull(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__isnull=True),
[self.objs[0]]
)
def test_contains(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contains={'a': 'b'}),
[self.objs[7], self.objs[8]]
)
def test_contained_by(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contained_by={'a': 'b', 'c': 1, 'h': True}),
[self.objs[6], self.objs[7]]
)
def test_has_key(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_key='a'),
[self.objs[7], self.objs[8]]
)
def test_has_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_keys=['a', 'c', 'h']),
[self.objs[8]]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_any_keys=['c', 'l']),
[self.objs[7], self.objs[8], self.objs[10]]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__0=1),
[self.objs[9]]
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__a='b'),
[self.objs[7], self.objs[8]]
)
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k__l='m'),
[self.objs[8]]
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k={'l': 'm'}),
[self.objs[8]]
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__1__0=2),
[self.objs[9]]
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__d__1__f='g'),
[self.objs[8]]
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__gt=1),
[]
)
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__lt=5),
[self.objs[7], self.objs[8]]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
JSONModel.objects.filter(id__in=JSONModel.objects.filter(field__c=1)),
self.objs[7:9]
)
@skipUnlessPG94
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": {"a": "b"}}, "model": "postgres_tests.jsonmodel", "pk": null}]'
def test_dumping(self):
instance = JSONModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertJSONEqual(data, self.test_data)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgreSQLTestCase):
def test_not_serializable(self):
field = JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(datetime.timedelta(days=1), None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "Value must be valid JSON.")
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.JSONField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_valid_empty(self):
field = forms.JSONField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_invalid(self):
field = forms.JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{some badly formed: json}')
self.assertEqual(cm.exception.messages[0], "'{some badly formed: json}' value must be valid JSON.")
def test_formfield(self):
model_field = JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_prepare_value(self):
field = forms.JSONField()
self.assertEqual(field.prepare_value({'a': 'b'}), '{"a": "b"}')
self.assertEqual(field.prepare_value(None), 'null')
| bsd-3-clause |
igoralmeida/tahoe-lafs | src/allmydata/storage/immutable.py | 7 | 13810 | import os, stat, struct, time
from foolscap.api import Referenceable
from zope.interface import implements
from allmydata.interfaces import RIBucketWriter, RIBucketReader
from allmydata.util import base32, fileutil, log
from allmydata.util.assertutil import precondition
from allmydata.util.hashutil import timing_safe_compare
from allmydata.storage.lease import LeaseInfo
from allmydata.storage.common import UnknownImmutableContainerVersionError, \
DataTooLargeError
# each share file (in storage/shares/$SI/$SHNUM) contains lease information
# and share data. The share data is accessed by RIBucketWriter.write and
# RIBucketReader.read . The lease information is not accessible through these
# interfaces.
# The share file has the following layout:
# 0x00: share file version number, four bytes, current version is 1
# 0x04: share data length, four bytes big-endian = A # See Footnote 1 below.
# 0x08: number of leases, four bytes big-endian
# 0x0c: beginning of share data (see immutable.layout.WriteBucketProxy)
# A+0x0c = B: first lease. Lease format is:
# B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner
# B+0x04: renew secret, 32 bytes (SHA256)
# B+0x24: cancel secret, 32 bytes (SHA256)
# B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch
# B+0x48: next lease, or end of record
# Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers,
# but it is still filled in by storage servers in case the storage server
# software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the
# share file is moved from one storage server to another. The value stored in
# this field is truncated, so if the actual share data length is >= 2**32,
# then the value stored in this field will be the actual share data length
# modulo 2**32.
class ShareFile:
LEASE_SIZE = struct.calcsize(">L32s32sL")
sharetype = "immutable"
def __init__(self, filename, max_size=None, create=False):
""" If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """
precondition((max_size is not None) or (not create), max_size, create)
self.home = filename
self._max_size = max_size
if create:
# touch the file, so later callers will see that we're working on
# it. Also construct the metadata.
assert not os.path.exists(self.home)
fileutil.make_dirs(os.path.dirname(self.home))
f = open(self.home, 'wb')
# The second field -- the four-byte share data length -- is no
# longer used as of Tahoe v1.3.0, but we continue to write it in
# there in case someone downgrades a storage server from >=
# Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
# server to another, etc. We do saturation -- a share data length
# larger than 2**32-1 (what can fit into the field) is marked as
# the largest length that can fit into the field. That way, even
# if this does happen, the old < v1.3.0 server will still allow
# clients to read the first part of the share.
f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
f.close()
self._lease_offset = max_size + 0x0c
self._num_leases = 0
else:
f = open(self.home, 'rb')
filesize = os.path.getsize(self.home)
(version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
f.close()
if version != 1:
msg = "sharefile %s had version %d but we wanted 1" % \
(filename, version)
raise UnknownImmutableContainerVersionError(msg)
self._num_leases = num_leases
self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
self._data_offset = 0xc
def unlink(self):
os.unlink(self.home)
def read_share_data(self, offset, length):
precondition(offset >= 0)
# reads beyond the end of the data are truncated. Reads that start
# beyond the end of the data return an empty string.
seekpos = self._data_offset+offset
actuallength = max(0, min(length, self._lease_offset-seekpos))
if actuallength == 0:
return ""
f = open(self.home, 'rb')
f.seek(seekpos)
return f.read(actuallength)
def write_share_data(self, offset, data):
length = len(data)
precondition(offset >= 0, offset)
if self._max_size is not None and offset+length > self._max_size:
raise DataTooLargeError(self._max_size, offset, length)
f = open(self.home, 'rb+')
real_offset = self._data_offset+offset
f.seek(real_offset)
assert f.tell() == real_offset
f.write(data)
f.close()
def _write_lease_record(self, f, lease_number, lease_info):
offset = self._lease_offset + lease_number * self.LEASE_SIZE
f.seek(offset)
assert f.tell() == offset
f.write(lease_info.to_immutable_data())
def _read_num_leases(self, f):
f.seek(0x08)
(num_leases,) = struct.unpack(">L", f.read(4))
return num_leases
def _write_num_leases(self, f, num_leases):
f.seek(0x08)
f.write(struct.pack(">L", num_leases))
def _truncate_leases(self, f, num_leases):
f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE)
def get_leases(self):
"""Yields a LeaseInfo instance for all leases."""
f = open(self.home, 'rb')
(version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
f.seek(self._lease_offset)
for i in range(num_leases):
data = f.read(self.LEASE_SIZE)
if data:
yield LeaseInfo().from_immutable_data(data)
def add_lease(self, lease_info):
f = open(self.home, 'rb+')
num_leases = self._read_num_leases(f)
self._write_lease_record(f, num_leases, lease_info)
self._write_num_leases(f, num_leases+1)
f.close()
def renew_lease(self, renew_secret, new_expire_time):
for i,lease in enumerate(self.get_leases()):
if timing_safe_compare(lease.renew_secret, renew_secret):
# yup. See if we need to update the owner time.
if new_expire_time > lease.expiration_time:
# yes
lease.expiration_time = new_expire_time
f = open(self.home, 'rb+')
self._write_lease_record(f, i, lease)
f.close()
return
raise IndexError("unable to renew non-existent lease")
def add_or_renew_lease(self, lease_info):
try:
self.renew_lease(lease_info.renew_secret,
lease_info.expiration_time)
except IndexError:
self.add_lease(lease_info)
def cancel_lease(self, cancel_secret):
"""Remove a lease with the given cancel_secret. If the last lease is
cancelled, the file will be removed. Return the number of bytes that
were freed (by truncating the list of leases, and possibly by
deleting the file. Raise IndexError if there was no lease with the
given cancel_secret.
"""
leases = list(self.get_leases())
num_leases_removed = 0
for i,lease in enumerate(leases):
if timing_safe_compare(lease.cancel_secret, cancel_secret):
leases[i] = None
num_leases_removed += 1
if not num_leases_removed:
raise IndexError("unable to find matching lease to cancel")
if num_leases_removed:
# pack and write out the remaining leases. We write these out in
# the same order as they were added, so that if we crash while
# doing this, we won't lose any non-cancelled leases.
leases = [l for l in leases if l] # remove the cancelled leases
f = open(self.home, 'rb+')
for i,lease in enumerate(leases):
self._write_lease_record(f, i, lease)
self._write_num_leases(f, len(leases))
self._truncate_leases(f, len(leases))
f.close()
space_freed = self.LEASE_SIZE * num_leases_removed
if not len(leases):
space_freed += os.stat(self.home)[stat.ST_SIZE]
self.unlink()
return space_freed
class BucketWriter(Referenceable):
implements(RIBucketWriter)
def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary):
self.ss = ss
self.incominghome = incominghome
self.finalhome = finalhome
self._max_size = max_size # don't allow the client to write more than this
self._canary = canary
self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected)
self.closed = False
self.throw_out_all_data = False
self._sharefile = ShareFile(incominghome, create=True, max_size=max_size)
# also, add our lease to the file now, so that other ones can be
# added by simultaneous uploaders
self._sharefile.add_lease(lease_info)
def allocated_size(self):
return self._max_size
def remote_write(self, offset, data):
start = time.time()
precondition(not self.closed)
if self.throw_out_all_data:
return
self._sharefile.write_share_data(offset, data)
self.ss.add_latency("write", time.time() - start)
self.ss.count("write")
def remote_close(self):
precondition(not self.closed)
start = time.time()
fileutil.make_dirs(os.path.dirname(self.finalhome))
fileutil.rename(self.incominghome, self.finalhome)
try:
# self.incominghome is like storage/shares/incoming/ab/abcde/4 .
# We try to delete the parent (.../ab/abcde) to avoid leaving
# these directories lying around forever, but the delete might
# fail if we're working on another share for the same storage
# index (like ab/abcde/5). The alternative approach would be to
# use a hierarchy of objects (PrefixHolder, BucketHolder,
# ShareWriter), each of which is responsible for a single
# directory on disk, and have them use reference counting of
# their children to know when they should do the rmdir. This
# approach is simpler, but relies on os.rmdir refusing to delete
# a non-empty directory. Do *not* use fileutil.rm_dir() here!
os.rmdir(os.path.dirname(self.incominghome))
# we also delete the grandparent (prefix) directory, .../ab ,
# again to avoid leaving directories lying around. This might
# fail if there is another bucket open that shares a prefix (like
# ab/abfff).
os.rmdir(os.path.dirname(os.path.dirname(self.incominghome)))
# we leave the great-grandparent (incoming/) directory in place.
except EnvironmentError:
# ignore the "can't rmdir because the directory is not empty"
# exceptions, those are normal consequences of the
# above-mentioned conditions.
pass
self._sharefile = None
self.closed = True
self._canary.dontNotifyOnDisconnect(self._disconnect_marker)
filelen = os.stat(self.finalhome)[stat.ST_SIZE]
self.ss.bucket_writer_closed(self, filelen)
self.ss.add_latency("close", time.time() - start)
self.ss.count("close")
def _disconnected(self):
if not self.closed:
self._abort()
def remote_abort(self):
log.msg("storage: aborting sharefile %s" % self.incominghome,
facility="tahoe.storage", level=log.UNUSUAL)
if not self.closed:
self._canary.dontNotifyOnDisconnect(self._disconnect_marker)
self._abort()
self.ss.count("abort")
def _abort(self):
if self.closed:
return
os.remove(self.incominghome)
# if we were the last share to be moved, remove the incoming/
# directory that was our parent
parentdir = os.path.split(self.incominghome)[0]
if not os.listdir(parentdir):
os.rmdir(parentdir)
self._sharefile = None
# We are now considered closed for further writing. We must tell
# the storage server about this so that it stops expecting us to
# use the space it allocated for us earlier.
self.closed = True
self.ss.bucket_writer_closed(self, 0)
class BucketReader(Referenceable):
implements(RIBucketReader)
def __init__(self, ss, sharefname, storage_index=None, shnum=None):
self.ss = ss
self._share_file = ShareFile(sharefname)
self.storage_index = storage_index
self.shnum = shnum
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__,
base32.b2a_l(self.storage_index[:8], 60),
self.shnum)
def remote_read(self, offset, length):
start = time.time()
data = self._share_file.read_share_data(offset, length)
self.ss.add_latency("read", time.time() - start)
self.ss.count("read")
return data
def remote_advise_corrupt_share(self, reason):
return self.ss.remote_advise_corrupt_share("immutable",
self.storage_index,
self.shnum,
reason)
| gpl-2.0 |
BubuLK/sfepy | tests/test_units.py | 5 | 4763 | from __future__ import absolute_import
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
import six
def _cmp(s1, s2):
s1 = s1.split()
s2 = s2.split()
v1, t1 = float(s1[0]), s1[1:]
v2, t2 = float(s2[0]), s2[1:]
return (abs(v1 - v2) < (1e-15 * abs(v1))) and (t1 == t2)
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_units(self):
from sfepy.mechanics.units import Unit, Quantity, sm
if sm is None:
self.report('cannot import sympy, skipping')
return True
units = ['m', 's', 'kg', 'C']
self.report('units:', units)
unit_set = [Unit(key) for key in units]
q1 = Quantity('stress', unit_set)
self.report(q1.name, ':', q1())
assert_(_cmp(q1(), '1.0 Pa'))
assert_(_cmp(q1('c'), '100.0 cPa'))
q2 = Quantity('force', unit_set)
self.report(q2.name, ':', q2())
assert_(_cmp(q2(), '1.0 Newton'))
assert_(_cmp(q2('d'), '0.1 dNewton'))
q3 = Quantity('energy', unit_set)
self.report(q3.name, ':', q3())
assert_(_cmp(q3(), '1.0 J'))
assert_(_cmp(q3('mu'), '1000000.0 muJ'))
units = ['mm', 's', 'g', 'C']
self.report('units:', units)
unit_set = [Unit(key) for key in units]
q1 = Quantity('stress', unit_set)
self.report(q1.name, ':', q1())
assert_(_cmp(q1(), '1.0 Pa'))
q2 = Quantity('force', unit_set)
self.report(q2.name, ':', q2())
assert_(_cmp(q2(), '1.0 muNewton'))
q3 = Quantity('energy', unit_set)
self.report(q3.name, ':', q3())
assert_(_cmp(q3(), '1.0 nJ'))
units = ['cm', 'ms', 'kg', 'kC']
self.report('units:', units)
unit_set = [Unit(key) for key in units]
q1 = Quantity('stress', unit_set)
self.report(q1.name, ':', q1())
assert_(_cmp(q1(), '0.1 GPa'))
q2 = Quantity('force', unit_set)
self.report(q2.name, ':', q2())
assert_(_cmp(q2(), '10.0 kNewton'))
q3 = Quantity('energy', unit_set)
self.report(q3.name, ':', q3())
assert_(_cmp(q3(), '0.1 kJ'))
q4 = Quantity('thermal_expandability', unit_set)
self.report(q4.name, ':', q4())
assert_(_cmp(q4(), '0.1 MPa / C'))
assert_(_cmp(q4('G'), '0.0001 GPa / C'))
assert_(_cmp(q4('M'), '0.1 MPa / C'))
assert_(_cmp(q4('k'), '100.0 kPa / C'))
assert_(_cmp(q4('d'), '10000.0 dPa / C'))
assert_(_cmp(q4(''), '100000.0 Pa / C'))
units = ['m', 's', 'g', 'C']
self.report('units:', units)
unit_set = [Unit(key) for key in units]
q4 = Quantity('thermal_expandability', unit_set)
self.report(q4.name, ':', q4())
assert_(_cmp(q4(), '1.0 mPa / C'))
assert_(_cmp(q4('k'), str(0.000001) + ' kPa / C'))
assert_(_cmp(q4('d'), '0.0001 dPa / C'))
assert_(_cmp(q4(''), '0.001 Pa / C'))
assert_(_cmp(q4('c'), '0.1 cPa / C'))
assert_(_cmp(q4('m'), '1.0 mPa / C'))
assert_(_cmp(q4('mu'), '1000.0 muPa / C'))
assert_(_cmp(q4('n'), '1000000.0 nPa / C'))
return True
def test_consistent_sets(self):
from sfepy.mechanics.units import get_consistent_unit_set, sm
if sm is None:
self.report('cannot import sympy, skipping')
return True
u_sets = {
('m', 's', 'kg', 'C') : {'force' : '1.0 Newton',
'stress' : '1.0 Pa',
'energy' : '1.0 J',
'thermal_expandability' : '1.0 Pa / C'},
('mm', 's', 'kg', 'C') : {'force' : '1.0 mNewton',
'stress' : '1.0 kPa',
'energy' : '1.0 muJ',
'thermal_expandability' : '1.0 kPa / C'},
('mm', 's', 'g', 'C') : {'force' : '1.0 muNewton',
'stress' : '1.0 Pa',
'energy' : '1.0 nJ',
'thermal_expandability' : '1.0 Pa / C'},
}
ok = True
for unit_set, true_derived_units in six.iteritems(u_sets):
self.report('units:', unit_set)
derived_units = get_consistent_unit_set(*unit_set)
for key, true_val in six.iteritems(true_derived_units):
val = derived_units[key]
_ok = _cmp(true_val, val)
self.report('%s: %s == %s -> %s' % (key, true_val, val, _ok))
ok = ok and _ok
return ok
| bsd-3-clause |
BrigDan/pykcd | xkcdb.py | 1 | 3602 | #!/usr/bin/env python3
import gi
import xkcd
from random import SystemRandom
gi.require_version("Gtk","3.0")
from gi.repository import Gtk
rand = SystemRandom()
class myWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="XKCD Browser")
#self.cur_comic stores the number of the latest comic
self.cur_comic=xkcd.getLatestComicNum()
#image display
self.image = Gtk.Image()
self.image_area = Gtk.Box()
self.image.set_from_file('/tmp/xkcd.png')
self.image_area.set_center_widget(self.image)
self.image_area.show_all()
#random button
self.rand_btn = Gtk.Button.new_with_label("random")
self.rand_btn.connect ("clicked", self.on_random_clicked)
#next button
self.nxt_btn = Gtk.Button.new_with_label(">")
self.nxt_btn.connect("clicked", self.on_nxt_clicked)
if self.cur_comic == xkcd.getLatestComicNum():
self.nxt_btn.set_sensitive(False)
#fast next button
self.fst_nxt_btn = Gtk.Button.new_with_label(">>")
self.fst_nxt_btn.connect("clicked", self.on_fst_nxt_clicked)
latest = xkcd.getLatestComicNum()
if self.cur_comic > latest - 5:
self.fst_nxt_btn.set_sensitive(False)
#previous button
self.prv_btn = Gtk.Button.new_with_label("<")
self.prv_btn.connect("clicked", self.on_prv_clicked)
#fast previous button
self.fst_prv_btn = Gtk.Button.new_with_label("<<")
self.fst_prv_btn.connect("clicked", self.on_fst_prv_clicked)
#organise buttons ~~~~~~~~~~~~~~~~~
self.main_box = Gtk.VBox()
self.main_box.add(self.image_area)
self.button_box = Gtk.HButtonBox()
self.button_box.set_homogeneous(False)
self.button_box.pack_start(self.fst_prv_btn, False, True, 0)
self.button_box.pack_start(self.prv_btn, False, True, 0)
self.button_box.pack_start(self.rand_btn, False, True, 0)
self.button_box.pack_start(self.nxt_btn, False, True, 0)
self.button_box.pack_start(self.fst_nxt_btn, False, True, 0)
self.main_box.add(self.button_box)
self.add(self.main_box)
#initialise ~~~~~~~~~~~~~~~~~~~~~~~
self.connect("delete-event", Gtk.main_quit)
self.show_all()
def on_nxt_clicked(self, button):
self.cur_comic += 1
self.update_image()
def on_fst_nxt_clicked(self, button):
self.cur_comic += 5
self.update_image()
def on_prv_clicked(self, button):
self.cur_comic -= 1
self.update_image()
def on_fst_prv_clicked(self, button):
self.cur_comic -= 5
self.update_image()
def on_random_clicked(self, button):
self.cur_comic=rand.randint(1,xkcd.getLatestComicNum())
self.update_image()
#Whenever we want to update the comic displayed we'll use this
def update_image(self):
xkcd.getComic(self.cur_comic).download(output='/tmp/',outputFile='xkcd.png')
self.image.set_from_file('/tmp/xkcd.png')
latest = xkcd.getLatestComicNum()
if self.cur_comic == latest:
self.nxt_btn.set_sensitive(False)
elif self.cur_comic < latest:
self.nxt_btn.set_sensitive(True)
if self.cur_comic > latest - 5:
self.fst_nxt_btn.set_sensitive(False)
elif self.cur_comic <= latest - 5:
self.fst_nxt_btn.set_sensitive(True)
comic = xkcd.getLatestComic()
comic.download(output="/tmp/",outputFile="xkcd.png")
Display = myWindow()
Gtk.main()
| gpl-3.0 |
gtoonstra/airflow | airflow/hooks/base_hook.py | 14 | 3184 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
from airflow.models import Connection
from airflow.exceptions import AirflowException
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
@provide_session
def _get_connections_from_db(cls, conn_id, session=None):
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
log = LoggingMixin().log
log.info("Using connection to: %s", conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
TREND50/GRANDproto_DAQ | cmd_tektro.py | 1 | 6783 | #30/06/2017
#VELLEYEN Stephane
#############################
import os,sys
import vxi11
instr = vxi11.Instrument("192.168.1.10")
print(instr.ask("*IDN?"))
choix=sys.argv[1]
if choix =="1": #Choix des parametres
ch=sys.argv[2]
func=sys.argv[3]
freq=sys.argv[4]
vcc=sys.argv[5]
vcc=int(vcc)
vmax=vpp/2
vmin=vpp/-2
if vcc>300:
print "Erreur vcc>300mv"
vmax=150
vmin=-150
print "Defaut: ",vmax
print "Defaut: ",vmin
else:
print ""
voffset=0
phase=0
out="ON"
symm=100
width=500
lead=10
trail=10
delay=20
dcyc=50
out="ON"
if func=="RAMP":
#set ramp symmetry
symmcmd = "SOUR{0}:FUNC:RAMP:SYMM {1}".format(ch,symm)
print "Setting ramp symmetry:",symmcmd
instr.write(symmcmd)
elif func=="PULS":
#set pulse width
widthcmd = "SOUR{0}:PULS:WIDT {1}ns".format(ch,width)
print "Setting pulse width:",widthcmd
instr.write(widthcmd)
#set edges
leadcmd = "SOUR{0}:PULS:TRAN:LEAD {1}ns".format(ch,lead)
print "Setting pulse leading:",leadcmd
instr.write(leadcmd)
trailcmd = "SOUR{0}:PULS:TRAN:TRA {1}ns".format(ch,trail)
print "Setting pulse trailing:",trailcmd
instr.write(trailcmd)
#set pulse delay
delaycmd = "SOUR{0}:PULS:DEL {1}ms".format(ch,delay)
print "Setting pulse delay:",delaycmd
instr.write(delaycmd)
#set pulse DCYC
dcyccmd = "SOUR{0}:PULS:DCYC {1}".format(ch,dcyc)
print "Setting pulse delay:",dcyccmd
instr.write(dcyccmd)
#set function
funccmd = "SOUR{0}:FUNC {1}".format(ch,func)
print "Setting function:",funccmd
instr.write(funccmd)
#Set frequency
freqcmd = "SOUR{0}:FREQ:FIX {1}".format(ch,freq)
print "Setting frequency:",freqcmd
instr.write(freqcmd)
#set high level
vmaxcmd = "SOUR{0}:VOLTAGE:HIGH {1}mV".format(ch,vmax)
print "Setting HIGHT Voltage:",vmaxcmd
instr.write(vmaxcmd)
#set low level
vmincmd = "SOUR{0}:VOLTAGE:LOW {1}mV".format(ch,vmin)
print "Setting Low Voltage:",vmincmd
instr.write(vmincmd)
#set offset
voffcmd = "SOUR{0}:VOLTAGE:OFFS {1}V".format(ch,voffset)
print "Setting offset Voltage:",voffcmd
instr.write(voffcmd)
#set phase
phasecmd = "SOUR{0}:PHAS {1}DEG".format(ch,phase)
print "Setting phase:",phasecmd
instr.write(phasecmd)
#set OUTPUT ON
outcmd = "OUTP{0} {1}".format(ch,out)
print "Setting outout:",outcmd
instr.write(outcmd)
elif choix=="2":# Sinus 66MHz
ch=2
func="SIN"
freq=66 #MHz
voffset=0 #V
vmax=50 #mV
vmin=-50 #mV
phase=0 #DEG
out="ON"
vcc=vmax+abs(vmin)
if vcc>300:
print "Erreur vcc>300mv"
vmax=150 #mV
vmin=-150 #mV
print "Defaut: ",vmax
print "Defaut: ",vmin
else:
print ""
#set function
funccmd = "SOUR{0}:FUNC {1}".format(ch,func)
print "Setting function:",funccmd
instr.write(funccmd)
#Set frequency
freqcmd = "SOUR{0}:FREQ:FIX {1}MHz".format(ch,freq)
print "Setting frequency(MHz):",freqcmd
instr.write(freqcmd)
#set high level
vmaxcmd = "SOUR{0}:VOLTAGE:IMM:HIGH {1}mV".format(ch,vmax)
print "Setting HIGHT Voltage(mV):",vmaxcmd
instr.write(vmaxcmd)
#set low level
vmincmd = "SOUR{0}:VOLTAGE:IMM:LOW {1}mV".format(ch,vmin)
print "Setting Low Voltage(mV):",vmincmd
instr.write(vmincmd)
#set offset
voffcmd = "SOUR{0}:VOLTAGE:OFFS {1}V".format(ch,voffset)
print "Setting offset Voltage(V):",voffcmd
instr.write(voffcmd)
#set phase
phasecmd = "SOUR{0}:PHAS {1}DEG".format(ch,phase)
print "Setting phase:",phasecmd
instr.write(phasecmd)
#set OUTPUT ON
outcmd = "OUTP{0} {1}".format(ch,out)
print "Setting outout:",outcmd
instr.write(outcmd)
elif choix =="3":# Square
ch=2
func="SQU"
freq=100 #Hz
voffset=0 #V
vmax=50 #mV
vmin=-50 #mV
phase=0 #DEG
out="ON"
vcc=vmax+abs(vmin)
if vcc>300:
print "Erreur vcc>300mv"
vmax=150 #mV
vmin=-150 #mV
print "Defaut: ",vmax
print "Defaut: ",vmin
else:
print ""
#set function
funccmd = "SOUR{0}:FUNC {1}".format(ch,func)
print "Setting function:",funccmd
instr.write(funccmd)
#Set frequency
freqcmd = "SOUR{0}:FREQ:FIX {1}Hz".format(ch,freq)
print "Setting frequency:",freqcmd
instr.write(freqcmd)
#set high level
vmaxcmd = "SOUR{0}:VOLTAGE:HIGH {1}mV".format(ch,vmax)
print "Setting HIGHT Voltage:",vmaxcmd
instr.write(vmaxcmd)
#set low level
vmincmd = "SOUR{0}:VOLTAGE:LOW {1}mV".format(ch,vmin)
print "Setting Low Voltage:",vmincmd
instr.write(vmincmd)
#set offset
voffcmd = "SOUR{0}:VOLTAGE:OFFS {1}V".format(ch,voffset)
print "Setting offset Voltage:",voffcmd
instr.write(voffcmd)
#set phase
phasecmd = "SOUR{0}:PHAS {1}DEG".format(ch,phase)
print "Setting phase:",phasecmd
instr.write(phasecmd)
#set OUTPUT ON
outcmd = "OUTP{0} {1}".format(ch,out)
print "Setting outout:",outcmd
instr.write(outcmd)
else:# Defaut
ch=2
func="SIN"
freq=66 #MHz
vmax =50 #mV
vmin =-50 #mV
voffset=0 #V
phase=0 #DEG
symm=100
width=500
lead=10
trail=10
delay=20
dcyc=50
out="ON"
if func=="RAMP":
#set ramp symmetry
symmcmd = "SOUR{0}:FUNC:RAMP:SYMM {1}".format(ch,symm)
print "Setting ramp symmetry:",symmcmd
instr.write(symmcmd)
elif func=="PULS":
#set pulse width
widthcmd = "SOUR{0}:PULS:WIDT {1}ns".format(ch,width)
print "Setting pulse width:",widthcmd
instr.write(widthcmd)
#set edges
leadcmd = "SOUR{0}:PULS:TRAN:LEAD {1}ns".format(ch,lead)
print "Setting pulse leading:",leadcmd
instr.write(leadcmd)
trailcmd = "SOUR{0}:PULS:TRAN:TRA {1}ns".format(ch,trail)
print "Setting pulse trailing:",trailcmd
instr.write(trailcmd)
#set pulse delay
delaycmd = "SOUR{0}:PULS:DEL {1}ms".format(ch,delay)
print "Setting pulse delay:",delaycmd
instr.write(delaycmd)
#set pulse DCYC
dcyccmd = "SOUR{0}:PULS:DCYC {1}".format(ch,dcyc)
print "Setting pulse delay:",dcyccmd
instr.write(dcyccmd)
vcc=vmax+abs(vmin)
if vcc>300:
print "Erreur vcc>300mv"
vmax=150 #mV
vmin=-150 #mV
print "Defaut: ",vmax
print "Defaut: ",vmin
else:
print ""
#set function
funccmd = "SOUR{0}:FUNC {1}".format(ch,func)
print "Setting function:",funccmd
instr.write(funccmd)
#Set frequency
freqcmd = "SOUR{0}:FREQ:FIX {1}MHz".format(ch,freq)
print "Setting frequency:",freqcmd
instr.write(freqcmd)
#set high level
vmaxcmd = "SOUR{0}:VOLTAGE:HIGH {1}mV".format(ch,vmax)
print "Setting HIGHT Voltage:",vmaxcmd
instr.write(vmaxcmd)
#set low level
vmincmd = "SOUR{0}:VOLTAGE:LOW {1}mV".format(ch,vmin)
print "Setting Low Voltage:",vmincmd
instr.write(vmincmd)
#set offset
voffcmd = "SOUR{0}:VOLTAGE:OFFS {1}V".format(ch,voffset)
print "Setting offset Voltage:",voffcmd
instr.write(voffcmd)
#set phase
phasecmd = "SOUR{0}:PHAS {1}DEG".format(ch,phase)
print "Setting phase:",phasecmd
instr.write(phasecmd)
#set OUTPUT ON
outcmd = "OUTP{0} {1}".format(ch,out)
print "Setting outout:",outcmd
instr.write(outcmd)
| gpl-3.0 |
benspaulding/django | tests/regressiontests/pagination_regress/tests.py | 9 | 8453 | from __future__ import unicode_literals
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.unittest import TestCase
class PaginatorTests(TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params)
def check_attribute(self, name, paginator, expected, params):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
self.assertEqual(expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params))
def test_invalid_page_number(self):
"""
Tests that invalid page numbers result in the correct exception being
raised.
"""
paginator = Paginator([1, 2, 3], 2)
self.assertRaises(PageNotAnInteger, paginator.validate_number, None)
self.assertRaises(PageNotAnInteger, paginator.validate_number, 'x')
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s."
" Paginator parameters were: %s")
self.assertEqual(start, page.start_index(),
msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(),
msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Tests that paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
Tests that a paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertTrue('k' in page2)
self.assertFalse('a' in page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
| bsd-3-clause |
onceuponatimeforever/oh-mainline | vendor/packages/html5lib/html5lib/tests/test_parser.py | 66 | 5037 | import os
import sys
import traceback
import StringIO
import warnings
import re
warnings.simplefilter("error")
from support import html5lib_test_files as data_files
from support import TestData, convert, convertExpected
import html5lib
from html5lib import html5parser, treebuilders, constants
treeTypes = {"simpletree":treebuilders.getTreeBuilder("simpletree"),
"DOM":treebuilders.getTreeBuilder("dom")}
#Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
try:
import lxml.html as lxml
except ImportError:
import lxml.etree as lxml
treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml", lxml, fullTree=True)
except ImportError:
pass
try:
import BeautifulSoup
treeTypes["beautifulsoup"] = treebuilders.getTreeBuilder("beautifulsoup", fullTree=True)
except ImportError:
pass
#Try whatever dom implementations are avaliable from a list that are
#"supposed" to work
try:
import pxdom
treeTypes["pxdom"] = treebuilders.getTreeBuilder("dom", pxdom)
except ImportError:
pass
#Run the parse error checks
checkParseErrors = False
#XXX - There should just be one function here but for some reason the testcase
#format differs from the treedump format by a single space character
def convertTreeDump(data):
return "\n".join(convert(3)(data).split("\n")[1:])
namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
#XXX - move this out into the setup function
#concatenate all consecutive character tokens into a single token
try:
p = html5parser.HTMLParser(tree = treeClass,
namespaceHTMLElements=namespaceHTMLElements)
except constants.DataLossWarning:
return
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
try:
document = p.parse(input)
except constants.DataLossWarning:
return
except:
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nTraceback:", traceback.format_exc()])
assert False, errorMsg.encode("utf8")
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(r"\1<html \2>", expected)
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nReceived:", output])
assert expected == output, errorMsg.encode("utf8")
errStr = [u"Line: %i Col: %i %s"%(line, col,
constants.E[errorcode] % datavars if isinstance(datavars, dict) else (datavars,)) for
((line,col), errorcode, datavars) in p.errors]
errorMsg2 = u"\n".join([u"\n\nInput:", input,
u"\nExpected errors (" + str(len(errors)) + u"):\n" + u"\n".join(errors),
u"\nActual errors (" + str(len(p.errors)) + u"):\n" + u"\n".join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2.encode("utf-8")
def test_parser():
sys.stderr.write('Testing tree builders '+ " ".join(treeTypes.keys()) + "\n")
files = data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat","")
tests = TestData(filename, "data")
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
'data', 'errors',
'document-fragment',
'document']
if errors:
errors = errors.split("\n")
for treeName, treeCls in treeTypes.iteritems():
for namespaceHTMLElements in (True, False):
print input
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
break
| agpl-3.0 |
tgbugs/pyontutils | neurondm/test/test_integration.py | 1 | 2300 | import unittest
from pathlib import Path
import pytest
from pyontutils.utils import get_working_dir
from pyontutils.config import auth
from pyontutils.integration_test_helper import _TestScriptsBase, Folders, Repo
import neurondm
class TestScripts(Folders, _TestScriptsBase):
""" woo! """
only = tuple()
lasts = tuple()
neurons = ('neurondm/example',
'neurondm/phenotype_namespaces',
'neurondm/models/allen_cell_types',
'neurondm/models/phenotype_direct',
'neurondm/models/basic_neurons',
'neurondm/models/huang2017',
'neurondm/models/ma2015',
'neurondm/models/cuts',
'neurondm/build',
'neurondm/sheets',)
skip = tuple()
olr = auth.get_path('ontology-local-repo')
if olr.exists():
ont_repo = Repo(olr)
# FIXME these aren't called?
post_load = lambda : (ont_repo.remove_diff_untracked(), ont_repo.checkout_diff_tracked())
post_main = lambda : (ont_repo.remove_diff_untracked(), ont_repo.checkout_diff_tracked())
### handle ontology branch behavior
checkout_ok = neurondm.core.ont_checkout_ok
print('checkout ok:', checkout_ok)
ont_branch = ont_repo.active_branch.name
if not checkout_ok and ont_branch != 'neurons':
neurons += ('neurondm/core', 'neurondm/lang',) # FIXME these two are ok for no repo but not wrong branch?!
skip += tuple(n.split('/')[-1] for n in neurons)
else:
lasts += tuple(f'neurondm/{s}.py' for s in neurons)
else:
skip += tuple(n.split('/')[-1] for n in neurons)
### build mains
mains = {} # NOTE mains run even if this is empty ? is this desired?
module_parent = Path(__file__).resolve().parent.parent.as_posix()
working_dir = get_working_dir(__file__)
if working_dir is None:
# python setup.py test will run from the module_parent folder
# I'm pretty the split was only implemented because I was trying
# to run all tests from the working_dir in one shot, but that has
# a number of problems with references to local vs installed packages
working_dir = module_parent
print(module_parent)
print(working_dir)
TestScripts.populate_tests(neurondm, working_dir, mains, skip=skip, lasts=lasts,
module_parent=module_parent, only=only, do_mains=True)
| mit |
vipul-sharma20/oh-mainline | vendor/packages/sqlparse/tests/test_filters.py | 45 | 2523 | '''
Created on 24/03/2012
@author: piranna
'''
import unittest
from sqlparse.filters import StripWhitespace, Tokens2Unicode
from sqlparse.lexer import tokenize
class Test__StripWhitespace(unittest.TestCase):
sql = """INSERT INTO dir_entries(type)VALUES(:type);
INSERT INTO directories(inode)
VALUES(:inode)
LIMIT 1"""
sql2 = """SELECT child_entry,asdf AS inode, creation
FROM links
WHERE parent_dir == :parent_dir AND name == :name
LIMIT 1"""
sql3 = """SELECT
0 AS st_dev,
0 AS st_uid,
0 AS st_gid,
dir_entries.type AS st_mode,
dir_entries.inode AS st_ino,
COUNT(links.child_entry) AS st_nlink,
:creation AS st_ctime,
dir_entries.access AS st_atime,
dir_entries.modification AS st_mtime,
COALESCE(files.size,0) AS st_size,
COALESCE(files.size,0) AS size
FROM dir_entries
LEFT JOIN files
ON dir_entries.inode == files.inode
LEFT JOIN links
ON dir_entries.inode == links.child_entry
WHERE dir_entries.inode == :inode
GROUP BY dir_entries.inode
LIMIT 1"""
def test_StripWhitespace1(self):
self.assertEqual(
Tokens2Unicode(StripWhitespace(tokenize(self.sql))),
'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
'directories(inode)VALUES(:inode)LIMIT 1')
def test_StripWhitespace2(self):
self.assertEqual(
Tokens2Unicode(StripWhitespace(tokenize(self.sql2))),
'SELECT child_entry,asdf AS inode,creation FROM links WHERE '
'parent_dir==:parent_dir AND name==:name LIMIT 1')
def test_StripWhitespace3(self):
self.assertEqual(
Tokens2Unicode(StripWhitespace(tokenize(self.sql3))),
'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS '
'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS '
'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,'
'dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS '
'st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN'
' files ON dir_entries.inode==files.inode LEFT JOIN links ON '
'dir_entries.inode==links.child_entry WHERE dir_entries.inode=='
':inode GROUP BY dir_entries.inode LIMIT 1')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| agpl-3.0 |
jkstrick/samba | buildtools/wafsamba/samba_autoconf.py | 15 | 28264 | # a waf tool to add autoconf-like macros to the configure section
import Build, os, sys, Options, preproc, Logs
import string
from Configure import conf
from samba_utils import *
import samba_cross
missing_headers = set()
####################################################
# some autoconf like helpers, to make the transition
# to waf a bit easier for those used to autoconf
# m4 files
@runonce
@conf
def DEFINE(conf, d, v, add_to_cflags=False, quote=False):
'''define a config option'''
conf.define(d, v, quote=quote)
if add_to_cflags:
conf.env.append_value('CCDEFINES', d + '=' + str(v))
def hlist_to_string(conf, headers=None):
'''convert a headers list to a set of #include lines'''
hdrs=''
hlist = conf.env.hlist
if headers:
hlist = hlist[:]
hlist.extend(TO_LIST(headers))
for h in hlist:
hdrs += '#include <%s>\n' % h
return hdrs
@conf
def COMPOUND_START(conf, msg):
'''start a compound test'''
def null_check_message_1(self,*k,**kw):
return
def null_check_message_2(self,*k,**kw):
return
v = getattr(conf.env, 'in_compound', [])
if v != [] and v != 0:
conf.env.in_compound = v + 1
return
conf.check_message_1(msg)
conf.saved_check_message_1 = conf.check_message_1
conf.check_message_1 = null_check_message_1
conf.saved_check_message_2 = conf.check_message_2
conf.check_message_2 = null_check_message_2
conf.env.in_compound = 1
@conf
def COMPOUND_END(conf, result):
'''start a compound test'''
conf.env.in_compound -= 1
if conf.env.in_compound != 0:
return
conf.check_message_1 = conf.saved_check_message_1
conf.check_message_2 = conf.saved_check_message_2
p = conf.check_message_2
if result is True:
p('ok')
elif not result:
p('not found', 'YELLOW')
else:
p(result)
@feature('nolink')
def nolink(self):
'''using the nolink type in conf.check() allows us to avoid
the link stage of a test, thus speeding it up for tests
that where linking is not needed'''
pass
def CHECK_HEADER(conf, h, add_headers=False, lib=None):
'''check for a header'''
if h in missing_headers and lib is None:
return False
d = h.upper().replace('/', '_')
d = d.replace('.', '_')
d = d.replace('-', '_')
d = 'HAVE_%s' % d
if CONFIG_SET(conf, d):
if add_headers:
if not h in conf.env.hlist:
conf.env.hlist.append(h)
return True
(ccflags, ldflags, cpppath) = library_flags(conf, lib)
hdrs = hlist_to_string(conf, headers=h)
if lib is None:
lib = ""
ret = conf.check(fragment='%s\nint main(void) { return 0; }' % hdrs,
type='nolink',
execute=0,
ccflags=ccflags,
includes=cpppath,
uselib=lib.upper(),
msg="Checking for header %s" % h)
if not ret:
missing_headers.add(h)
return False
conf.DEFINE(d, 1)
if add_headers and not h in conf.env.hlist:
conf.env.hlist.append(h)
return ret
@conf
def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None):
'''check for a list of headers
when together==True, then the headers accumulate within this test.
This is useful for interdependent headers
'''
ret = True
if not add_headers and together:
saved_hlist = conf.env.hlist[:]
set_add_headers = True
else:
set_add_headers = add_headers
for hdr in TO_LIST(headers):
if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib):
ret = False
if not add_headers and together:
conf.env.hlist = saved_hlist
return ret
def header_list(conf, headers=None, lib=None):
'''form a list of headers which exist, as a string'''
hlist=[]
if headers is not None:
for h in TO_LIST(headers):
if CHECK_HEADER(conf, h, add_headers=False, lib=lib):
hlist.append(h)
return hlist_to_string(conf, headers=hlist)
@conf
def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None):
'''check for a single type'''
if define is None:
define = 'HAVE_' + t.upper().replace(' ', '_')
if msg is None:
msg='Checking for %s' % t
ret = CHECK_CODE(conf, '%s _x' % t,
define,
execute=False,
headers=headers,
local_include=False,
msg=msg,
lib=lib,
link=False)
if not ret and alternate:
conf.DEFINE(t, alternate)
return ret
@conf
def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None):
'''check for a list of types'''
ret = True
for t in TO_LIST(list):
if not CHECK_TYPE(conf, t, headers=headers,
define=define, alternate=alternate, lib=lib):
ret = False
return ret
@conf
def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None):
'''check for a single type with a header'''
return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define)
@conf
def CHECK_VARIABLE(conf, v, define=None, always=False,
headers=None, msg=None, lib=None):
'''check for a variable declaration (or define)'''
if define is None:
define = 'HAVE_%s' % v.upper()
if msg is None:
msg="Checking for variable %s" % v
return CHECK_CODE(conf,
# we need to make sure the compiler doesn't
# optimize it out...
'''
#ifndef %s
void *_x; _x=(void *)&%s; return (int)_x;
#endif
return 0
''' % (v, v),
execute=False,
link=False,
msg=msg,
local_include=False,
lib=lib,
headers=headers,
define=define,
always=always)
@conf
def CHECK_DECLS(conf, vars, reverse=False, headers=None, always=False):
'''check a list of variable declarations, using the HAVE_DECL_xxx form
of define
When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx
'''
ret = True
for v in TO_LIST(vars):
if not reverse:
define='HAVE_DECL_%s' % v.upper()
else:
define='HAVE_%s_DECL' % v.upper()
if not CHECK_VARIABLE(conf, v,
define=define,
headers=headers,
msg='Checking for declaration of %s' % v,
always=always):
if not CHECK_CODE(conf,
'''
return (int)%s;
''' % (v),
execute=False,
link=False,
msg='Checking for declaration of %s (as enum)' % v,
local_include=False,
headers=headers,
define=define,
always=always):
ret = False
return ret
def CHECK_FUNC(conf, f, link=True, lib=None, headers=None):
'''check for a function'''
define='HAVE_%s' % f.upper()
ret = False
conf.COMPOUND_START('Checking for %s' % f)
if link is None or link:
ret = CHECK_CODE(conf,
# this is based on the autoconf strategy
'''
#define %s __fake__%s
#ifdef HAVE_LIMITS_H
# include <limits.h>
#else
# include <assert.h>
#endif
#undef %s
#if defined __stub_%s || defined __stub___%s
#error "bad glibc stub"
#endif
extern char %s();
int main() { return %s(); }
''' % (f, f, f, f, f, f, f),
execute=False,
link=True,
addmain=False,
add_headers=False,
define=define,
local_include=False,
lib=lib,
headers=headers,
msg='Checking for %s' % f)
if not ret:
ret = CHECK_CODE(conf,
# it might be a macro
# we need to make sure the compiler doesn't
# optimize it out...
'void *__x = (void *)%s; return (int)__x' % f,
execute=False,
link=True,
addmain=True,
add_headers=True,
define=define,
local_include=False,
lib=lib,
headers=headers,
msg='Checking for macro %s' % f)
if not ret and (link is None or not link):
ret = CHECK_VARIABLE(conf, f,
define=define,
headers=headers,
msg='Checking for declaration of %s' % f)
conf.COMPOUND_END(ret)
return ret
@conf
def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None):
'''check for a list of functions'''
ret = True
for f in TO_LIST(list):
if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers):
ret = False
return ret
@conf
def CHECK_SIZEOF(conf, vars, headers=None, define=None, critical=True):
'''check the size of a type'''
for v in TO_LIST(vars):
v_define = define
ret = False
if v_define is None:
v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_')
for size in list((1, 2, 4, 8, 16, 32)):
if CHECK_CODE(conf,
'static int test_array[1 - 2 * !(((long int)(sizeof(%s))) <= %d)];' % (v, size),
define=v_define,
quote=False,
headers=headers,
local_include=False,
msg="Checking if size of %s == %d" % (v, size)):
conf.DEFINE(v_define, size)
ret = True
break
if not ret and critical:
Logs.error("Couldn't determine size of '%s'" % v)
sys.exit(1)
return ret
@conf
def CHECK_VALUEOF(conf, v, headers=None, define=None):
'''check the value of a variable/define'''
ret = True
v_define = define
if v_define is None:
v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_')
if CHECK_CODE(conf,
'printf("%%u", (unsigned)(%s))' % v,
define=v_define,
execute=True,
define_ret=True,
quote=False,
headers=headers,
local_include=False,
msg="Checking value of %s" % v):
return int(conf.env[v_define])
return None
@conf
def CHECK_CODE(conf, code, define,
always=False, execute=False, addmain=True,
add_headers=True, mandatory=False,
headers=None, msg=None, cflags='', includes='# .',
local_include=True, lib=None, link=True,
define_ret=False, quote=False,
on_target=True):
'''check if some code compiles and/or runs'''
if CONFIG_SET(conf, define):
return True
if headers is not None:
CHECK_HEADERS(conf, headers=headers, lib=lib)
if add_headers:
hdrs = header_list(conf, headers=headers, lib=lib)
else:
hdrs = ''
if execute:
execute = 1
else:
execute = 0
defs = conf.get_config_header()
if addmain:
fragment='%s\n%s\n int main(void) { %s; return 0; }\n' % (defs, hdrs, code)
else:
fragment='%s\n%s\n%s\n' % (defs, hdrs, code)
if msg is None:
msg="Checking for %s" % define
cflags = TO_LIST(cflags)
if local_include:
cflags.append('-I%s' % conf.curdir)
if not link:
type='nolink'
else:
type='cprogram'
uselib = TO_LIST(lib)
(ccflags, ldflags, cpppath) = library_flags(conf, uselib)
includes = TO_LIST(includes)
includes.extend(cpppath)
uselib = [l.upper() for l in uselib]
cflags.extend(ccflags)
if on_target:
exec_args = conf.SAMBA_CROSS_ARGS(msg=msg)
else:
exec_args = []
conf.COMPOUND_START(msg)
ret = conf.check(fragment=fragment,
execute=execute,
define_name = define,
mandatory = mandatory,
ccflags=cflags,
ldflags=ldflags,
includes=includes,
uselib=uselib,
type=type,
msg=msg,
quote=quote,
exec_args=exec_args,
define_ret=define_ret)
if not ret and CONFIG_SET(conf, define):
# sometimes conf.check() returns false, but it
# sets the define. Maybe a waf bug?
ret = True
if ret:
if not define_ret:
conf.DEFINE(define, 1)
conf.COMPOUND_END(True)
else:
conf.COMPOUND_END(conf.env[define])
return True
if always:
conf.DEFINE(define, 0)
conf.COMPOUND_END(False)
return False
@conf
def CHECK_STRUCTURE_MEMBER(conf, structname, member,
always=False, define=None, headers=None):
'''check for a structure member'''
if define is None:
define = 'HAVE_%s' % member.upper()
return CHECK_CODE(conf,
'%s s; void *_x; _x=(void *)&s.%s' % (structname, member),
define,
execute=False,
link=False,
always=always,
headers=headers,
local_include=False,
msg="Checking for member %s in %s" % (member, structname))
@conf
def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n'):
'''check if the given cflags are accepted by the compiler
'''
return conf.check(fragment=fragment,
execute=0,
type='nolink',
ccflags=cflags,
msg="Checking compiler accepts %s" % cflags)
@conf
def CHECK_LDFLAGS(conf, ldflags):
'''check if the given ldflags are accepted by the linker
'''
return conf.check(fragment='int main(void) { return 0; }\n',
execute=0,
ldflags=ldflags,
msg="Checking linker accepts %s" % ldflags)
@conf
def CONFIG_GET(conf, option):
'''return True if a configuration option was found'''
if (option in conf.env):
return conf.env[option]
else:
return None
@conf
def CONFIG_SET(conf, option):
'''return True if a configuration option was found'''
if option not in conf.env:
return False
v = conf.env[option]
if v is None:
return False
if v == []:
return False
if v == ():
return False
return True
@conf
def CONFIG_RESET(conf, option):
if option not in conf.env:
return
del conf.env[option]
Build.BuildContext.CONFIG_RESET = CONFIG_RESET
Build.BuildContext.CONFIG_SET = CONFIG_SET
Build.BuildContext.CONFIG_GET = CONFIG_GET
def library_flags(self, libs):
'''work out flags from pkg_config'''
ccflags = []
ldflags = []
cpppath = []
for lib in TO_LIST(libs):
# note that we do not add the -I and -L in here, as that is added by the waf
# core. Adding it here would just change the order that it is put on the link line
# which can cause system paths to be added before internal libraries
extra_ccflags = TO_LIST(getattr(self.env, 'CCFLAGS_%s' % lib.upper(), []))
extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), []))
extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), []))
ccflags.extend(extra_ccflags)
ldflags.extend(extra_ldflags)
cpppath.extend(extra_cpppath)
if 'EXTRA_LDFLAGS' in self.env:
ldflags.extend(self.env['EXTRA_LDFLAGS'])
ccflags = unique_list(ccflags)
ldflags = unique_list(ldflags)
cpppath = unique_list(cpppath)
return (ccflags, ldflags, cpppath)
@conf
def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False):
'''check if a set of libraries exist as system libraries
returns the sublist of libs that do exist as a syslib or []
'''
fragment= '''
int foo()
{
int v = 2;
return v*2;
}
'''
ret = []
liblist = TO_LIST(libs)
for lib in liblist[:]:
if GET_TARGET_TYPE(conf, lib) == 'SYSLIB':
ret.append(lib)
continue
(ccflags, ldflags, cpppath) = library_flags(conf, lib)
if shlib:
res = conf.check(features='cc cshlib', fragment=fragment, lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper())
else:
res = conf.check(lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper())
if not res:
if mandatory:
Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list))
sys.exit(1)
if empty_decl:
# if it isn't a mandatory library, then remove it from dependency lists
if set_target:
SET_TARGET_TYPE(conf, lib, 'EMPTY')
else:
conf.define('HAVE_LIB%s' % lib.upper().replace('-','_').replace('.','_'), 1)
conf.env['LIB_' + lib.upper()] = lib
if set_target:
conf.SET_TARGET_TYPE(lib, 'SYSLIB')
ret.append(lib)
return ret
@conf
def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False,
headers=None, link=True, empty_decl=True, set_target=True):
"""
check that the functions in 'list' are available in 'library'
if they are, then make that library available as a dependency
if the library is not available and mandatory==True, then
raise an error.
If the library is not available and mandatory==False, then
add the library to the list of dependencies to remove from
build rules
optionally check for the functions first in libc
"""
remaining = TO_LIST(list)
liblist = TO_LIST(library)
# check if some already found
for f in remaining[:]:
if CONFIG_SET(conf, 'HAVE_%s' % f.upper()):
remaining.remove(f)
# see if the functions are in libc
if checklibc:
for f in remaining[:]:
if CHECK_FUNC(conf, f, link=True, headers=headers):
remaining.remove(f)
if remaining == []:
for lib in liblist:
if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl:
SET_TARGET_TYPE(conf, lib, 'EMPTY')
return True
checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target)
for lib in liblist[:]:
if not lib in checklist and mandatory:
Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list))
sys.exit(1)
ret = True
for f in remaining:
if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link):
ret = False
return ret
@conf
def IN_LAUNCH_DIR(conf):
'''return True if this rule is being run from the launch directory'''
return os.path.realpath(conf.curdir) == os.path.realpath(Options.launch_dir)
Options.Handler.IN_LAUNCH_DIR = IN_LAUNCH_DIR
@conf
def SAMBA_CONFIG_H(conf, path=None):
'''write out config.h in the right directory'''
# we don't want to produce a config.h in places like lib/replace
# when we are building projects that depend on lib/replace
if not IN_LAUNCH_DIR(conf):
return
if conf.CHECK_CFLAGS(['-fstack-protector']) and conf.CHECK_LDFLAGS(['-fstack-protector']):
conf.ADD_CFLAGS('-fstack-protector')
conf.ADD_LDFLAGS('-fstack-protector')
if Options.options.debug:
conf.ADD_CFLAGS('-g', testflags=True)
if Options.options.developer:
conf.env.DEVELOPER_MODE = True
conf.ADD_CFLAGS('-g', testflags=True)
conf.ADD_CFLAGS('-Wall', testflags=True)
conf.ADD_CFLAGS('-Wshadow', testflags=True)
conf.ADD_CFLAGS('-Wmissing-prototypes', testflags=True)
conf.ADD_CFLAGS('-Wcast-align -Wcast-qual', testflags=True)
conf.ADD_CFLAGS('-fno-common', testflags=True)
conf.ADD_CFLAGS('-Werror=address', testflags=True)
# we add these here to ensure that -Wstrict-prototypes is not set during configure
conf.ADD_CFLAGS('-Werror=strict-prototypes -Wstrict-prototypes',
testflags=True)
conf.ADD_CFLAGS('-Werror=write-strings -Wwrite-strings',
testflags=True)
conf.ADD_CFLAGS('-Werror-implicit-function-declaration',
testflags=True)
conf.ADD_CFLAGS('-Werror=pointer-arith -Wpointer-arith',
testflags=True)
conf.ADD_CFLAGS('-Werror=declaration-after-statement -Wdeclaration-after-statement',
testflags=True)
conf.ADD_CFLAGS('-Werror=return-type -Wreturn-type',
testflags=True)
conf.ADD_CFLAGS('-Werror=uninitialized -Wuninitialized',
testflags=True)
conf.ADD_CFLAGS('-Wformat=2 -Wno-format-y2k', testflags=True)
# This check is because for ldb_search(), a NULL format string
# is not an error, but some compilers complain about that.
if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], '''
int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2)));
int main(void) {
testformat(0);
return 0;
}
'''):
if not 'EXTRA_CFLAGS' in conf.env:
conf.env['EXTRA_CFLAGS'] = []
conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format"))
if Options.options.picky_developer:
conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Werror -Wno-error=deprecated-declarations', testflags=True)
if Options.options.fatal_errors:
conf.ADD_CFLAGS('-Wfatal-errors', testflags=True)
if Options.options.pedantic:
conf.ADD_CFLAGS('-W', testflags=True)
if Options.options.address_sanitizer:
conf.ADD_CFLAGS('-fno-omit-frame-pointer -O1 -fsanitize=address', testflags=True)
conf.ADD_LDFLAGS('-fsanitize=address', testflags=True)
conf.env['ADDRESS_SANITIZER'] = True
# Let people pass an additional ADDITIONAL_{CFLAGS,LDFLAGS}
# environment variables which are only used the for final build.
#
# The CFLAGS and LDFLAGS environment variables are also
# used for the configure checks which might impact their results.
conf.add_os_flags('ADDITIONAL_CFLAGS')
if conf.env.ADDITIONAL_CFLAGS and conf.CHECK_CFLAGS(conf.env['ADDITIONAL_CFLAGS']):
conf.env['EXTRA_CFLAGS'].extend(conf.env['ADDITIONAL_CFLAGS'])
conf.add_os_flags('ADDITIONAL_LDFLAGS')
if conf.env.ADDITIONAL_LDFLAGS and conf.CHECK_LDFLAGS(conf.env['ADDITIONAL_LDFLAGS']):
conf.env['EXTRA_LDFLAGS'].extend(conf.env['ADDITIONAL_LDFLAGS'])
if path is None:
conf.write_config_header('config.h', top=True)
else:
conf.write_config_header(path)
conf.SAMBA_CROSS_CHECK_COMPLETE()
@conf
def CONFIG_PATH(conf, name, default):
'''setup a configurable path'''
if not name in conf.env:
if default[0] == '/':
conf.env[name] = default
else:
conf.env[name] = conf.env['PREFIX'] + default
@conf
def ADD_NAMED_CFLAGS(conf, name, flags, testflags=False):
'''add some CFLAGS to the command line
optionally set testflags to ensure all the flags work
'''
if testflags:
ok_flags=[]
for f in flags.split():
if CHECK_CFLAGS(conf, f):
ok_flags.append(f)
flags = ok_flags
if not name in conf.env:
conf.env[name] = []
conf.env[name].extend(TO_LIST(flags))
@conf
def ADD_CFLAGS(conf, flags, testflags=False):
'''add some CFLAGS to the command line
optionally set testflags to ensure all the flags work
'''
ADD_NAMED_CFLAGS(conf, 'EXTRA_CFLAGS', flags, testflags=testflags)
@conf
def ADD_LDFLAGS(conf, flags, testflags=False):
'''add some LDFLAGS to the command line
optionally set testflags to ensure all the flags work
this will return the flags that are added, if any
'''
if testflags:
ok_flags=[]
for f in flags.split():
if CHECK_LDFLAGS(conf, f):
ok_flags.append(f)
flags = ok_flags
if not 'EXTRA_LDFLAGS' in conf.env:
conf.env['EXTRA_LDFLAGS'] = []
conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags))
return flags
@conf
def ADD_EXTRA_INCLUDES(conf, includes):
'''add some extra include directories to all builds'''
if not 'EXTRA_INCLUDES' in conf.env:
conf.env['EXTRA_INCLUDES'] = []
conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes))
def CURRENT_CFLAGS(bld, target, cflags, allow_warnings=False, hide_symbols=False):
'''work out the current flags. local flags are added first'''
ret = TO_LIST(cflags)
if not 'EXTRA_CFLAGS' in bld.env:
list = []
else:
list = bld.env['EXTRA_CFLAGS'];
ret.extend(list)
if not allow_warnings and 'PICKY_CFLAGS' in bld.env:
list = bld.env['PICKY_CFLAGS'];
ret.extend(list)
if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR:
ret.append(bld.env.VISIBILITY_CFLAGS)
return ret
@conf
def CHECK_CC_ENV(conf):
"""trim whitespaces from 'CC'.
The build farm sometimes puts a space at the start"""
if os.environ.get('CC'):
conf.env.CC = TO_LIST(os.environ.get('CC'))
if len(conf.env.CC) == 1:
# make for nicer logs if just a single command
conf.env.CC = conf.env.CC[0]
@conf
def SETUP_CONFIGURE_CACHE(conf, enable):
'''enable/disable cache of configure results'''
if enable:
# when -C is chosen, we will use a private cache and will
# not look into system includes. This roughtly matches what
# autoconf does with -C
cache_path = os.path.join(conf.blddir, '.confcache')
mkdir_p(cache_path)
Options.cache_global = os.environ['WAFCACHE'] = cache_path
else:
# when -C is not chosen we will not cache configure checks
# We set the recursion limit low to prevent waf from spending
# a lot of time on the signatures of the files.
Options.cache_global = os.environ['WAFCACHE'] = ''
preproc.recursion_limit = 1
# in either case we don't need to scan system includes
preproc.go_absolute = False
@conf
def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf):
# we don't want any libraries or modules to rely on runtime
# resolution of symbols
if not sys.platform.startswith("openbsd"):
conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True)
if not sys.platform.startswith("openbsd") and conf.env.undefined_ignore_ldflags == []:
if conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup']):
conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup']
@conf
def CHECK_CFG(self, *k, **kw):
return self.check_cfg(*k, **kw)
| gpl-3.0 |
thinkopensolutions/geraldo | geraldo/charts.py | 10 | 17669 | import re, random, decimal
from reportlab.graphics.shapes import Drawing, String
from reportlab.graphics.charts.barcharts import HorizontalBarChart as OriginalHorizBarChart
from reportlab.graphics.charts.barcharts import VerticalBarChart as OriginalVertBarChart
from reportlab.graphics.charts.barcharts import HorizontalBarChart3D as OriginalHorizBarChart3D
from reportlab.graphics.charts.barcharts import VerticalBarChart3D as OriginalVertBarChart3D
from reportlab.graphics.charts.doughnut import Doughnut as OriginalDoughnutChart
from reportlab.graphics.charts.linecharts import HorizontalLineChart as OriginalLineChart
from reportlab.graphics.charts.piecharts import Pie as OriginalPieChart
from reportlab.graphics.charts.spider import SpiderChart as OriginalSpiderChart
from reportlab.graphics.charts.legends import Legend
from reportlab.lib.colors import HexColor, getAllNamedColors
from utils import cm, memoize, get_attr_value
from cross_reference import CrossReferenceMatrix, CROSS_COLS, CROSS_ROWS
from graphics import Graphic
DEFAULT_TITLE_HEIGHT = 1*cm
class BaseChart(Graphic):
"""Abstract chart class"""
chart_class = None
title = None
colors = None
_width = 8*cm
_height = 7*cm
rows_attribute = None
cols_attribute = None
cell_attribute = None
action = 'first'
data = None
chart_style = None # Additional chart attributes
axis_labels = None
axis_labels_angle = None
legend_labels = False
values_labels = ' %s '
replace_none_by_zero = True
round_values = False
summarize_by = None # Can be None, CROSS_ROWS or CROSS_COLS
def __init__(self, **kwargs):
# Set instance attributes
for k,v in kwargs.items():
if k == 'style':
setattr(self, 'chart_style', v)
else:
setattr(self, k, v)
# Prepare the title
if self.title:
self.title = isinstance(self.title, dict) and self.title or {'text': self.title}
self.title.setdefault('fontSize', 14)
self.title.setdefault('textAnchor', 'middle')
self.title.setdefault('height', DEFAULT_TITLE_HEIGHT)
# Prepare the colors
if self.colors == False:
self.legend_labels = None
elif not self.colors:
self.colors = self.get_available_colors()
else:
self.prepare_colors()
# Prepare chart additional kwargs
self.chart_style = self.chart_style or {}
def clone(self):
new = super(BaseChart, self).clone()
new.chart_class = self.chart_class
new.title = self.title
new.colors = self.colors
new.rows_attribute = self.rows_attribute
new.cols_attribute = self.cols_attribute
new.cell_attribute = self.cell_attribute
new.action = self.action
new.data = self.data
new.chart_style = self.chart_style
new.axis_labels = self.axis_labels
new.axis_labels_angle = self.axis_labels_angle
new.legend_labels = self.legend_labels
new.values_labels = self.values_labels
new.replace_none_by_zero = self.replace_none_by_zero
new.round_values = self.round_values
new.summarize_by = self.summarize_by
return new
# DRAWING METHODS
@memoize
def get_available_colors(self):
"""Returns a list of available colors"""
# Get reportlab available colors
colors = getAllNamedColors()
# Remove bad colors
colors.pop('white', None)
colors.pop('black', None)
# Returns only the colors values (without their names)
colors = colors.values()
# Shuffle colors list
random.shuffle(colors)
return colors
def prepare_colors(self):
colors = []
for color in self.colors:
try:
colors.append(HexColor(color))
except ValueError:
pass
self.colors = colors + self.get_available_colors()
def get_drawing(self, chart):
"""Create and returns the drawing, to be generated"""
drawing = Drawing(self.width, self.height)
# Make the title
title = self.make_title(drawing)
# Setting chart dimensions
chart.height = self.height
chart.width = self.width
# Make the legend
legend = self.make_legend(drawing, chart)
if title:
chart.height -= self.title.get('height', DEFAULT_TITLE_HEIGHT)
self.top += self.title.get('height', DEFAULT_TITLE_HEIGHT)
# Setting additional chart attributes
self.set_chart_style(chart)
# Adds the chart to drawing to return
drawing.add(chart)
# Resizes to make sure everything is fitting
drawing = drawing.resized()
return drawing
def make_legend(self, drawing, chart):
if not self.legend_labels:
return
# Get legend labels
labels = self.get_legend_labels()
# Legend object
legend = Legend()
legend.colorNamePairs = zip(self.colors[:len(labels)], labels)
legend.columnMaximum = len(legend.colorNamePairs)
legend.deltay = 5
legend.alignment = 'right'
legend.x = drawing.width + 40
legend.y = drawing.height - (self.title and self.title.get('height', DEFAULT_TITLE_HEIGHT) or 0)
# Sets legend extra attributes if legend_labels is a dictionary
if isinstance(self.legend_labels, dict):
for k,v in self.legend_labels.items():
if k != 'labels' and v:
setattr(legend, k, v)
drawing.add(legend)
return legend
def get_legend_labels(self):
# Use same axis if is summarizing
if self.summarize_by:
return self.get_axis_labels()
# Base labels
if isinstance(self.legend_labels, dict) and self.legend_labels.get('labels', None):
labels = self.legend_labels['labels']
elif isinstance(self.legend_labels, (tuple,list)):
labels = self.legend_labels
else:
labels = self.get_cross_data().rows()
# Calculated labels
if callable(self.legend_labels):
labels = [self.legend_labels(self, label, num) for num, label in enumerate(labels)]
elif isinstance(self.legend_labels, basestring):
labels = [self.get_cross_data().first(self.legend_labels, col=label) for label in labels]
return map(unicode, labels)
def get_axis_labels(self):
# Base labels
if isinstance(self.axis_labels, dict) and self.axis_labels.get('labels', None):
labels = self.axis_labels['labels']
elif isinstance(self.axis_labels, (tuple,list)):
labels = self.axis_labels
elif self.summarize_by == CROSS_ROWS:
labels = self.get_cross_data().rows()
else:
labels = self.get_cross_data().cols()
# Calculated labels
if callable(self.axis_labels):
labels = [self.axis_labels(self, label, num) for num, label in enumerate(labels)]
elif isinstance(self.axis_labels, basestring):
if self.summarize_by == CROSS_ROWS:
labels = [self.get_cross_data().first(self.axis_labels, row=label) for label in labels]
else:
labels = [self.get_cross_data().first(self.axis_labels, col=label) for label in labels]
return map(unicode, labels)
def make_title(self, drawing):
if not self.title:
return
# Make the dict with kwargs
kwargs = self.title.copy()
kwargs.setdefault('x', drawing.width / 2)
kwargs.setdefault('y', drawing.height)
# Make the string
title = String(**kwargs)
drawing.add(title)
return title
# CHART METHODS
def get_cross_data(self, data=None):
if not getattr(self, '_cross_data', None):
data = data or self.data
# Transforms data to cross-reference matrix
if isinstance(data, basestring):
data = get_attr_value(self.instance, data)
if not isinstance(data, CrossReferenceMatrix):
if self.rows_attribute: # and self.cols_attribute:
data = CrossReferenceMatrix(
data,
self.rows_attribute,
self.cols_attribute,
decimal_as_float=True,
)
self._cross_data = data
return self._cross_data
def get_data(self):
data = self.data
# Returns nothing data is empty
if not data:
data = self.report.queryset # TODO: Change to support current objects
# list (for subreports and groups)
# Transforms data to cross-reference matrix
data = self.get_cross_data(data)
# Summarize data or get its matrix (after it is a Cross-Reference Matrix)
if self.summarize_by == CROSS_ROWS:
data = data.summarize_rows(self.cell_attribute, self.action)
elif self.summarize_by == CROSS_COLS:
data = data.summarize_cols(self.cell_attribute, self.action)
else:
data = data.matrix(self.cell_attribute, self.action)
def none_to_zero(value):
if value is None:
value = 0
elif isinstance(value, (list, tuple)):
value = [cell or 0 for cell in value]
return value
def round_values(value):
if isinstance(value, (float, decimal.Decimal)):
value = int(round(value))
elif isinstance(value, (list, tuple)):
value = map(int, map(round, value))
return value
# Replace None to Zero
if self.replace_none_by_zero:
data = map(none_to_zero, data)
# Truncate decimal places
if self.round_values:
data = map(round_values, data)
# Stores major value in temporary variable to use it later
if data:
if isinstance(data[0], int):
self._max_value = max(data)
elif isinstance(data[0], (list, tuple)):
self._max_value = max(map(max, data))
return data
def set_chart_attributes(self, chart):
# Cols (Y) labels - Y axis
if self.axis_labels:
chart.categoryAxis.categoryNames = self.get_axis_labels()
if self.axis_labels_angle is not None:
chart.categoryAxis.labels.angle = self.axis_labels_angle
chart.categoryAxis.labels.boxAnchor = 'ne'
def set_chart_style(self, chart):
# Setting additional chart attributes
if self.chart_style:
for k,v in self.chart_style.items():
setattr(chart, k, v)
def create_chart(self):
chart = self.chart_class()
return chart
def render(self):
# Make data matrix
data = self.get_data()
if not data:
return
# Creates the chart instance
chart = self.create_chart()
chart.data = data
# Sets additional attributes
self.set_chart_attributes(chart)
return self.get_drawing(chart)
class BaseMatrixChart(BaseChart):
"""Abstract chart class to support matrix charts"""
def get_data(self):
data = super(BaseMatrixChart, self).get_data()
if data and self.summarize_by:
data = [data]
return data
class LineChart(BaseMatrixChart):
chart_class = OriginalLineChart
def set_chart_attributes(self, chart):
super(LineChart, self).set_chart_attributes(chart)
# Cells labels
if isinstance(self.values_labels, (tuple, list)):
self.chart_style.setdefault('lineLabelFormat', self.values_labels)
elif isinstance(self.values_labels, dict) and self.values_labels.get('labels', None):
self.chart_style.setdefault('lineLabelFormat', self.values_labels['labels'])
else:
self.chart_style.pop('lineLabelFormat', None)
# Set the line colors
if self.colors:
for num, color in enumerate(self.colors):
try:
chart.lines[num].strokeColor = color
except IndexError:
break
# Value Axis min value
if getattr(self, 'y_axis_min_value', None) != None:
chart.valueAxis.valueMin = self.y_axis_min_value
# Informed value axis step value
if getattr(self, 'y_axis_step_value', None):
chart.valueAxis.valueStep = self.y_axis_step_value
# Value axis without decimal values
elif self.round_values and getattr(self, '_max_value', None):
chart.valueAxis.valueStep = round(self._max_value / 4)
class BarChart(BaseMatrixChart):
chart_class = None
horizontal = False # If is not horizontal, is because it is vertical (default)
is3d = False
def __init__(self, *args, **kwargs):
super(BarChart, self).__init__(*args, **kwargs)
# Chart class varies depending on attributes
if not self.chart_class:
if self.horizontal and self.is3d:
self.chart_class = OriginalHorizBarChart3D
elif self.horizontal:
self.chart_class = OriginalHorizBarChart
elif self.is3d:
self.chart_class = OriginalVertBarChart3D
else:
self.chart_class = OriginalVertBarChart
def clone(self):
new = super(BarChart, self).clone()
new.horizontal = self.horizontal
new.is3d = self.is3d
return new
def set_chart_attributes(self, chart):
super(BarChart, self).set_chart_attributes(chart)
# Cells labels
if self.values_labels:
if isinstance(self.values_labels, (tuple, list)):
self.chart_style.setdefault('barLabelFormat', self.values_labels)
elif isinstance(self.values_labels, dict) and self.values_labels.get('labels', None):
self.chart_style.setdefault('barLabelFormat', self.values_labels['labels'])
# Label orientation
if self.horizontal:
chart.barLabels.boxAnchor = 'w'
else:
chart.barLabels.boxAnchor = 's'
else:
self.chart_style.pop('barLabelFormat', None)
# Set bar strokes
chart.bars.strokeWidth = 0
# Forces bars to start from 0 (instead of lower value)
chart.valueAxis.forceZero = 1
# Shows axis X labels
if not self.summarize_by: # XXX
chart.categoryAxis.categoryNames = self.get_axis_labels()
# Set the bar colors
if self.colors:
for num, color in enumerate(self.colors):
try:
chart.bars[num].fillColor = color
except IndexError:
break
def get_data(self):
data = super(BarChart, self).get_data()
# Forces multiple colors
if self.summarize_by and data:
data = [[i] for i in data[0]]
return data
class HorizontalBarChart(BarChart):
horizontal = True
class SpiderChart(BaseMatrixChart):
chart_class = OriginalSpiderChart
def set_chart_attributes(self, chart):
# Chart labels
chart.labels = self.get_axis_labels()
# Set the strands colors
if self.colors:
for num, color in enumerate(self.colors):
try:
chart.strands[num].fillColor = color
except IndexError:
break
class PieChart(BaseChart):
chart_class = OriginalPieChart
slice_popout = None
def __init__(self, **kwargs):
super(PieChart, self).__init__(**kwargs)
# Force default value for summarize
if not self.summarize_by:
self.summarize_by = CROSS_ROWS
def set_chart_attributes(self, chart):
# Sets the slice colors
if self.colors:
for num, color in enumerate(self.colors):
try:
chart.slices[num].fillColor = color
except IndexError:
break
# Sets the slice to popout
pos = -1
if self.slice_popout == True:
data = self.get_data()
pos = data.index(max(data))
elif isinstance(self.slice_popout, int):
pos = self.slice_popout
elif callable(self.slice_popout):
pos = self.slice_popout(self, chart)
if pos >= 0:
chart.slices[pos].popout = 20
# Default labels
chart.labels = self.get_axis_labels()
# Cells labels
if isinstance(self.values_labels, dict):
for k,v in self.values_labels.items():
if k == 'labels' and v:
chart.labels = v
else:
setattr(chart.slices, k, v)
def clone(self):
new = super(PieChart, self).clone()
new.slice_popout = self.slice_popout
return new
def get_drawing(self, chart):
if self.action == 'percent':
chart.labels = ['%s - %s%%'%(label,val) for label, val in zip(chart.labels, chart.data)]
return super(PieChart, self).get_drawing(chart)
class DoughnutChart(PieChart):
chart_class = OriginalDoughnutChart
| lgpl-3.0 |
jbzdak/edx-platform | common/lib/xmodule/xmodule/textannotation_module.py | 23 | 6794 | """Text annotation module"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 |
auready/django | django/contrib/gis/db/backends/postgis/pgraster.py | 491 | 5071 | import binascii
import struct
from django.forms import ValidationError
from .const import (
GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL,
STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return binascii.hexlify(struct.pack('<' + structure, *data)).upper()
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, binascii.unhexlify(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def get_pgraster_srid(data):
"""
Extract the SRID from a PostGIS raster string.
"""
if data is None:
return
# The positional arguments here extract the hex-encoded srid from the
# header of the PostGIS raster string. This can be understood through
# the POSTGIS_HEADER_STRUCTURE constant definition in the const module.
return unpack('i', data[106:114])[0]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Subtract nodata byte from band nodata value if it exists
has_nodata = pixeltype >= 64
if has_nodata:
pixeltype -= 64
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': binascii.unhexlify(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Return if the raster is null
if rast is None or rast == '':
return
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Hexlify raster header
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype integer plus 64 as a flag for existing
# nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64)
#
# For example, if the byte value is 71, then the datatype is
# 71-64 = 7 (32BSI) and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype += 64
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Hexlify band data
band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper()
# Add packed header and band data to result
result += bandheader + band_data_hex
# Cast raster to string before passing it to the DB
return result.decode()
| bsd-3-clause |
xgin/letsencrypt | letsencrypt-apache/letsencrypt_apache/tests/tls_sni_01_test.py | 9 | 4782 | """Test for letsencrypt_apache.tls_sni_01."""
import unittest
import shutil
import mock
from letsencrypt.plugins import common_test
from letsencrypt_apache import obj
from letsencrypt_apache.tests import util
class TlsSniPerformTest(util.ApacheTest):
"""Test the ApacheTlsSni01 challenge."""
auth_key = common_test.TLSSNI01Test.auth_key
achalls = common_test.TLSSNI01Test.achalls
def setUp(self): # pylint: disable=arguments-differ
super(TlsSniPerformTest, self).setUp()
config = util.get_apache_configurator(
self.config_path, self.config_dir, self.work_dir)
config.config.tls_sni_01_port = 443
from letsencrypt_apache import tls_sni_01
self.sni = tls_sni_01.ApacheTlsSni01(config)
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
def test_perform0(self):
resp = self.sni.perform()
self.assertEqual(len(resp), 0)
@mock.patch("letsencrypt.le_util.exe_exists")
@mock.patch("letsencrypt.le_util.run_script")
def test_perform1(self, _, mock_exists):
mock_register = mock.Mock()
self.sni.configurator.reverter.register_undo_command = mock_register
mock_exists.return_value = True
self.sni.configurator.parser.update_runtime_variables = mock.Mock()
achall = self.achalls[0]
self.sni.add_chall(achall)
response = self.achalls[0].response(self.auth_key)
mock_setup_cert = mock.MagicMock(return_value=response)
# pylint: disable=protected-access
self.sni._setup_challenge_cert = mock_setup_cert
responses = self.sni.perform()
# Make sure that register_undo_command was called into temp directory.
self.assertEqual(True, mock_register.call_args[0][0])
mock_setup_cert.assert_called_once_with(achall)
# Check to make sure challenge config path is included in apache config.
self.assertEqual(
len(self.sni.configurator.parser.find_dir(
"Include", self.sni.challenge_conf)), 1)
self.assertEqual(len(responses), 1)
self.assertEqual(responses[0], response)
def test_perform2(self):
# Avoid load module
self.sni.configurator.parser.modules.add("ssl_module")
acme_responses = []
for achall in self.achalls:
self.sni.add_chall(achall)
acme_responses.append(achall.response(self.auth_key))
mock_setup_cert = mock.MagicMock(side_effect=acme_responses)
# pylint: disable=protected-access
self.sni._setup_challenge_cert = mock_setup_cert
sni_responses = self.sni.perform()
self.assertEqual(mock_setup_cert.call_count, 2)
# Make sure calls made to mocked function were correct
self.assertEqual(
mock_setup_cert.call_args_list[0], mock.call(self.achalls[0]))
self.assertEqual(
mock_setup_cert.call_args_list[1], mock.call(self.achalls[1]))
self.assertEqual(
len(self.sni.configurator.parser.find_dir(
"Include", self.sni.challenge_conf)),
1)
self.assertEqual(len(sni_responses), 2)
for i in xrange(2):
self.assertEqual(sni_responses[i], acme_responses[i])
def test_mod_config(self):
z_domains = []
for achall in self.achalls:
self.sni.add_chall(achall)
z_domain = achall.response(self.auth_key).z_domain
z_domains.append(set([z_domain]))
self.sni._mod_config() # pylint: disable=protected-access
self.sni.configurator.save()
self.sni.configurator.parser.find_dir(
"Include", self.sni.challenge_conf)
vh_match = self.sni.configurator.aug.match(
"/files" + self.sni.challenge_conf + "//VirtualHost")
vhs = []
for match in vh_match:
# pylint: disable=protected-access
vhs.append(self.sni.configurator._create_vhost(match))
self.assertEqual(len(vhs), 2)
for vhost in vhs:
self.assertEqual(vhost.addrs, set([obj.Addr.fromstring("*:443")]))
names = vhost.get_names()
self.assertTrue(names in z_domains)
def test_get_addrs_default(self):
self.sni.configurator.choose_vhost = mock.Mock(
return_value=obj.VirtualHost(
"path", "aug_path", set([obj.Addr.fromstring("_default_:443")]),
False, False)
)
self.assertEqual(
set([obj.Addr.fromstring("*:443")]),
self.sni._get_addrs(self.achalls[0])) # pylint: disable=protected-access
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
akx/shoop | shoop/core/models/_units.py | 1 | 1973 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import with_statement
from decimal import Decimal
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from parler.models import TranslatableModel, TranslatedFields
from shoop.core.fields import InternalIdentifierField
from shoop.utils.numbers import bankers_round, parse_decimal_string
__all__ = ("SalesUnit",)
@python_2_unicode_compatible
class SalesUnit(TranslatableModel):
identifier = InternalIdentifierField(unique=True)
decimals = models.PositiveSmallIntegerField(default=0, verbose_name=_(u"allowed decimals"))
translations = TranslatedFields(
name=models.CharField(max_length=128, verbose_name=_('name')),
short_name=models.CharField(max_length=128, verbose_name=_('short name')),
)
class Meta:
verbose_name = _('sales unit')
verbose_name_plural = _('sales units')
def __str__(self):
return str(self.safe_translation_getter("name", default=None))
@property
def allow_fractions(self):
return self.decimals > 0
@cached_property
def quantity_step(self):
"""
Get the quantity increment for the amount of decimals this unit allows.
For 0 decimals, this will be 1; for 1 decimal, 0.1; etc.
:return: Decimal in (0..1]
:rtype: Decimal
"""
# This particular syntax (`10 ^ -n`) is the same that `bankers_round` uses
# to figure out the quantizer.
return Decimal(10) ** (-int(self.decimals))
def round(self, value):
return bankers_round(parse_decimal_string(value), self.decimals)
| agpl-3.0 |
Work4Labs/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/geometry/test_data.py | 364 | 2994 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import os
from django.contrib import gis
from django.utils import simplejson
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in d.iteritems()])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = simplejson.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| gpl-3.0 |
lissarae/sumatrapdf | ext/freetype2/src/tools/docmaker/docbeauty.py | 877 | 2642 | #!/usr/bin/env python
#
# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
#
# This program is used to beautify the documentation comments used
# in the FreeType 2 public headers.
#
from sources import *
from content import *
from utils import *
import utils
import sys, os, time, string, getopt
content_processor = ContentProcessor()
def beautify_block( block ):
if block.content:
content_processor.reset()
markups = content_processor.process_content( block.content )
text = []
first = 1
for markup in markups:
text.extend( markup.beautify( first ) )
first = 0
# now beautify the documentation "borders" themselves
lines = [" /*************************************************************************"]
for l in text:
lines.append( " *" + l )
lines.append( " */" )
block.lines = lines
def usage():
print "\nDocBeauty 0.1 Usage information\n"
print " docbeauty [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -b : backup original files with the 'orig' extension"
print ""
print " --backup : same as -b"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"hb", \
["help", "backup"] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
output_dir = None
do_backup = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-b", "--backup" ):
do_backup = 1
# create context and processor
source_processor = SourceProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
for block in source_processor.blocks:
beautify_block( block )
new_name = filename + ".new"
ok = None
try:
file = open( new_name, "wt" )
for block in source_processor.blocks:
for line in block.lines:
file.write( line )
file.write( "\n" )
file.close()
except:
ok = 0
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
| gpl-3.0 |
godfather1103/WeiboRobot | python27/1.0/lib/site-packages/pip/vcs/mercurial.py | 280 | 4974 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'], cwd=dest)
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['clone', '--noupdate', '-q', url, dest])
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = self.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = self.run_command(['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = self.run_command(
['branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = self.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = self.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (
egg_project_name,
branch_revs[current_rev],
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs.register(Mercurial)
| gpl-3.0 |
piotrek-golda/CivilHubCopy | guides/models.py | 3 | 3546 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from locations.models import Location
from organizations.models import Organization
from places_core.permissions import is_moderator
from projects.models import SlugifiedModelMixin
@python_2_unicode_compatible
class GuideCategory(models.Model):
""" Very simple model for category.
"""
name = models.CharField(max_length=64, verbose_name=_(u"name"))
description = models.TextField(blank=True, default="", verbose_name=_(u"description"))
def __str__(self):
return self.name
class Meta:
verbose_name = _(u"category")
verbose_name_plural = _(u"categories")
@python_2_unicode_compatible
class GuideTag(models.Model):
""" Tags may be related to many published guides.
"""
name = models.CharField(max_length=64, verbose_name=_(u"name"))
def __str__(self):
return self.name
class Meta:
verbose_name = _(u"tag")
verbose_name_plural = _(u"tags")
@python_2_unicode_compatible
class Guide(SlugifiedModelMixin):
"""
Core model for this application - the guide itself. This model holds a lot
of foreign relations. It may be published in one or more locations and we
can also add NGO to participate. There may be also more than one author.
"""
STATUS_CHOICES = (
(1, _(u"draft")),
(2, _(u"published")),
)
owner = models.ForeignKey(User, related_name="owned_guides", verbose_name=_(u"owner"))
editors = models.ManyToManyField(User, related_name="permitted_guides", verbose_name=_(u"editors"), blank=True, null=True,
help_text=_(u"Select people permitted to modify this guide. You don't have to include yourself if you're owner."))
content = models.TextField(default="", verbose_name=_(u"content"))
authors = models.ManyToManyField(User, blank=True, null=True, verbose_name=_(u"authors"), related_name="authored_guides")
location = models.ForeignKey(Location, verbose_name=_(u"location"), related_name="guides")
tags = models.ManyToManyField(GuideTag, blank=True, null=True, verbose_name=_(u"tags"))
category = models.ForeignKey(GuideCategory, blank=True, null=True, verbose_name=_(u"category"), related_name="guides")
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_(u"date created"))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_(u"last modified"))
organizations = models.ManyToManyField(Organization, blank=True, null=True, verbose_name=_(u"organizations"), related_name="guides")
status = models.PositiveIntegerField(choices=STATUS_CHOICES, default=1, verbose_name=_(u"status"))
def get_absolute_url(self):
return reverse('guides:detail', kwargs={
'location_slug': self.location.slug,
'slug': self.slug, })
def has_access(self, user):
access = False
if user.is_superuser:
access = True
elif user == self.owner:
access = True
elif user in self.editors.all():
access = True
elif is_moderator(user, self.location):
access = True
return access
def __str__(self):
return self.name
class Meta:
verbose_name = _(u"guide")
verbose_name_plural = _(u"guides")
| gpl-3.0 |
mgit-at/ansible | packaging/sdist/check-link-behavior.py | 114 | 1290 | #!/usr/bin/env python
"""Checks for link behavior required for sdist to retain symlinks."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import shutil
import sys
import tempfile
def main():
"""Main program entry point."""
temp_dir = tempfile.mkdtemp()
target_path = os.path.join(temp_dir, 'file.txt')
symlink_path = os.path.join(temp_dir, 'symlink.txt')
hardlink_path = os.path.join(temp_dir, 'hardlink.txt')
try:
with open(target_path, 'w'):
pass
os.symlink(target_path, symlink_path)
os.link(symlink_path, hardlink_path)
if not os.path.islink(symlink_path):
abort('Symbolic link not created.')
if not os.path.islink(hardlink_path):
# known issue on MacOS (Darwin)
abort('Hard link of symbolic link created as a regular file.')
finally:
shutil.rmtree(temp_dir)
def abort(reason):
"""
:type reason: str
"""
sys.exit('ERROR: %s\n'
'This will prevent symbolic links from being preserved in the resulting tarball.\n'
'Aborting creation of sdist on platform: %s'
% (reason, platform.system()))
if __name__ == '__main__':
main()
| gpl-3.0 |
AMOboxTV/AMOBox.LegoBuild | script.module.youtube.dl/lib/youtube_dl/extractor/drtuber.py | 122 | 2521 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import str_to_int
class DrTuberIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?drtuber\.com/video/(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
'info_dict': {
'id': '1740434',
'display_id': 'hot-perky-blonde-naked-golf',
'ext': 'mp4',
'title': 'hot perky blonde naked golf',
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'],
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_url = self._html_search_regex(
r'<source src="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
[r'<p[^>]+class="title_substrate">([^<]+)</p>', r'<title>([^<]+) - \d+'],
webpage, 'title')
thumbnail = self._html_search_regex(
r'poster="([^"]+)"',
webpage, 'thumbnail', fatal=False)
def extract_count(id_, name):
return str_to_int(self._html_search_regex(
r'<span[^>]+(?:class|id)="%s"[^>]*>([\d,\.]+)</span>' % id_,
webpage, '%s count' % name, fatal=False))
like_count = extract_count('rate_likes', 'like')
dislike_count = extract_count('rate_dislikes', 'dislike')
comment_count = extract_count('comments_count', 'comment')
cats_str = self._search_regex(
r'<div[^>]+class="categories_list">(.+?)</div>', webpage, 'categories', fatal=False)
categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': self._rta_search(webpage),
}
| gpl-2.0 |
Frostman/eho-horizon | openstack_dashboard/dashboards/project/loadbalancers/tabs.py | 9 | 5153 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.utils.translation import ugettext as _
from horizon import exceptions
from horizon import tabs
from horizon import tables
from openstack_dashboard import api
from .tables import PoolsTable, MembersTable, MonitorsTable
class PoolsTab(tabs.TableTab):
table_classes = (PoolsTable,)
name = _("Pools")
slug = "pools"
template_name = "horizon/common/_detail_table.html"
def get_poolstable_data(self):
try:
pools = api.lbaas.pools_get(self.tab_group.request)
poolsFormatted = [p.readable(self.tab_group.request) for
p in pools]
except:
poolsFormatted = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve pools list.'))
return poolsFormatted
class MembersTab(tabs.TableTab):
table_classes = (MembersTable,)
name = _("Members")
slug = "members"
template_name = "horizon/common/_detail_table.html"
def get_memberstable_data(self):
try:
members = api.lbaas.members_get(self.tab_group.request)
membersFormatted = [m.readable(self.tab_group.request) for
m in members]
except:
membersFormatted = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member list.'))
return membersFormatted
class MonitorsTab(tabs.TableTab):
table_classes = (MonitorsTable,)
name = _("Monitors")
slug = "monitors"
template_name = "horizon/common/_detail_table.html"
def get_monitorstable_data(self):
try:
monitors = api.lbaas.pool_health_monitors_get(
self.tab_group.request)
except:
monitors = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor list.'))
return monitors
class LoadBalancerTabs(tabs.TabGroup):
slug = "lbtabs"
tabs = (PoolsTab, MembersTab, MonitorsTab)
sticky = True
class PoolDetailsTab(tabs.Tab):
name = _("Pool Details")
slug = "pooldetails"
template_name = "project/loadbalancers/_pool_details.html"
def get_context_data(self, request):
pid = self.tab_group.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(request, pid)
except:
pool = []
exceptions.handle(request,
_('Unable to retrieve pool details.'))
return {'pool': pool}
class VipDetailsTab(tabs.Tab):
name = _("Vip Details")
slug = "vipdetails"
template_name = "project/loadbalancers/_vip_details.html"
def get_context_data(self, request):
vid = self.tab_group.kwargs['vip_id']
try:
vip = api.lbaas.vip_get(request, vid)
except:
vip = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve vip details.'))
return {'vip': vip}
class MemberDetailsTab(tabs.Tab):
name = _("Member Details")
slug = "memberdetails"
template_name = "project/loadbalancers/_member_details.html"
def get_context_data(self, request):
mid = self.tab_group.kwargs['member_id']
try:
member = api.lbaas.member_get(request, mid)
except:
member = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member details.'))
return {'member': member}
class MonitorDetailsTab(tabs.Tab):
name = _("Monitor Details")
slug = "monitordetails"
template_name = "project/loadbalancers/_monitor_details.html"
def get_context_data(self, request):
mid = self.tab_group.kwargs['monitor_id']
try:
monitor = api.lbaas.pool_health_monitor_get(request, mid)
except:
monitor = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor details.'))
return {'monitor': monitor}
class PoolDetailsTabs(tabs.TabGroup):
slug = "pooltabs"
tabs = (PoolDetailsTab,)
class VipDetailsTabs(tabs.TabGroup):
slug = "viptabs"
tabs = (VipDetailsTab,)
class MemberDetailsTabs(tabs.TabGroup):
slug = "membertabs"
tabs = (MemberDetailsTab,)
class MonitorDetailsTabs(tabs.TabGroup):
slug = "monitortabs"
tabs = (MonitorDetailsTab,)
| apache-2.0 |
rizumu/django | tests/view_tests/tests/test_debug.py | 6 | 40531 | # -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.db import DatabaseError, connection
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CallableSettingWrapper, ExceptionReporter, technical_500_response,
)
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
if six.PY3:
from .py3_test_debug import Py3ExceptionReporterTests # NOQA
class User(object):
def __str__(self):
return 'jacob'
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default urlconf template is shown shown instead
of the technical 404 page, if the user has not altered their
url conf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
try:
html = reporter.get_traceback_html()
except BrokenEvaluation:
self.fail("Broken evaluation in traceback is not caught.")
self.assertIn(
"BrokenEvaluation",
html,
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
| bsd-3-clause |
xiangke/pycopia | process/pycopia/rsynclib.py | 1 | 2736 | #!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Wrapper for the 'rsync' program. See the rsync manpage for more details.
"""
from pycopia import proctools
try:
RSYNC = proctools.which("rsync")
except ValueError:
raise ImportError, "rsync program not found!"
TESTED_VERSIONS = ["rsync version 2.5.5 protocol version 26"]
def rsync(src, dst, password=None, extraopts=None, logfile=None):
"""rsync(src, dst, [password, [extraopts, [logfile]]])
Usage: rsync [OPTION]... SRC [SRC]... [USER@]HOST:DEST
or rsync [OPTION]... [USER@]HOST:SRC DEST
or rsync [OPTION]... SRC [SRC]... DEST
or rsync [OPTION]... [USER@]HOST::SRC [DEST]
or rsync [OPTION]... SRC [SRC]... [USER@]HOST::DEST
or rsync [OPTION]... rsync://[USER@]HOST[:PORT]/SRC [DEST]
You might want to set the RSYNC_RSH environment variable first.
"""
opts = "-q"
if extraopts:
opts += extraopts
CMD = "%s %s %s %s" % (RSYNC, opts, src, dst)
rsync = proctools.spawnpty(CMD, logfile=logfile)
# assume a password will be requested if one is supplied here
if password is not None:
from pycopia import expect
ersync = expect.Expect(rsync)
ersync.expect("password:", timeout=2.0)
ersync.writeln(password)
del ersync
rsync.wait()
return rsync.exitstatus # user can check exit status for success
def rsync_version():
"""rsync_version() Return the version string for the rsync command on this
system."""
rsync = proctools.spawnpipe("rsync --version")
ver = rsync.readline() # version on first line of output
rsync.read() # discard rest
rsync.close()
return ver
def check_version():
"""Checks that the installed rsync program is the same one that this module was
tested with (and written for)."""
ver = rsync_version()[15:20]
for vs in TESTED_VERSIONS:
if ver == vs[15:20]:
return 1
return 0
if __name__ == "__main__":
if check_version():
print "your rsync version is good!"
else:
print "your rsync version is an untested one, beware of errors!"
| lgpl-2.1 |
lmazuel/azure-sdk-for-python | azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/operation_list_result.py | 1 | 1314 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationListResult(Model):
"""The list of available operations for Data Lake Store.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: The results of the list operation.
:vartype value: list[~azure.mgmt.datalake.store.models.Operation]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self):
super(OperationListResult, self).__init__()
self.value = None
self.next_link = None
| mit |
mbauskar/erpnext | erpnext/commands/__init__.py | 41 | 1431 | # Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import, print_function
import click
import frappe
from frappe.commands import pass_context, get_site
def call_command(cmd, context):
return click.Context(cmd, obj=context).forward(cmd)
@click.command('make-demo')
@click.option('--site', help='site name')
@click.option('--domain', default='Manufacturing')
@click.option('--days', default=100,
help='Run the demo for so many days. Default 100')
@click.option('--resume', default=False, is_flag=True,
help='Continue running the demo for given days')
@click.option('--reinstall', default=False, is_flag=True,
help='Reinstall site before demo')
@pass_context
def make_demo(context, site, domain='Manufacturing', days=100,
resume=False, reinstall=False):
"Reinstall site and setup demo"
from frappe.commands.site import _reinstall
from frappe.installer import install_app
site = get_site(context)
if resume:
with frappe.init_site(site):
frappe.connect()
from erpnext.demo import demo
demo.simulate(days=days)
else:
if reinstall:
_reinstall(site, yes=True)
with frappe.init_site(site=site):
frappe.connect()
if not 'erpnext' in frappe.get_installed_apps():
install_app('erpnext')
# import needs site
from erpnext.demo import demo
demo.make(domain, days)
commands = [
make_demo
] | gpl-3.0 |
liuliwork/django | tests/template_tests/filter_tests/test_add.py | 503 | 1688 | from datetime import date, timedelta
from django.template.defaultfilters import add
from django.test import SimpleTestCase
from ..utils import setup
class AddTests(SimpleTestCase):
"""
Tests for #11687 and #16676
"""
@setup({'add01': '{{ i|add:"5" }}'})
def test_add01(self):
output = self.engine.render_to_string('add01', {'i': 2000})
self.assertEqual(output, '2005')
@setup({'add02': '{{ i|add:"napis" }}'})
def test_add02(self):
output = self.engine.render_to_string('add02', {'i': 2000})
self.assertEqual(output, '')
@setup({'add03': '{{ i|add:16 }}'})
def test_add03(self):
output = self.engine.render_to_string('add03', {'i': 'not_an_int'})
self.assertEqual(output, '')
@setup({'add04': '{{ i|add:"16" }}'})
def test_add04(self):
output = self.engine.render_to_string('add04', {'i': 'not_an_int'})
self.assertEqual(output, 'not_an_int16')
@setup({'add05': '{{ l1|add:l2 }}'})
def test_add05(self):
output = self.engine.render_to_string('add05', {'l1': [1, 2], 'l2': [3, 4]})
self.assertEqual(output, '[1, 2, 3, 4]')
@setup({'add06': '{{ t1|add:t2 }}'})
def test_add06(self):
output = self.engine.render_to_string('add06', {'t1': (3, 4), 't2': (1, 2)})
self.assertEqual(output, '(3, 4, 1, 2)')
@setup({'add07': '{{ d|add:t }}'})
def test_add07(self):
output = self.engine.render_to_string('add07', {'d': date(2000, 1, 1), 't': timedelta(10)})
self.assertEqual(output, 'Jan. 11, 2000')
class FunctionTests(SimpleTestCase):
def test_add(self):
self.assertEqual(add('1', '2'), 3)
| bsd-3-clause |
rubencabrera/odoo | addons/crm/__init__.py | 329 | 1265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
import crm_segmentation
import crm_lead
import sales_team
import calendar_event
import ir_http
import crm_phonecall
import report
import wizard
import res_partner
import res_config
import base_partner_merge
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andela-angene/coursebuilder-core | coursebuilder/modules/assessments/assessments_tests.py | 3 | 8401 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for assessments"""
__author__ = 'John Orr (jorr@google.com)'
import urlparse
from xml import etree
import yaml
from controllers import sites
from models import courses
from models import transforms
from tests.functional import actions
ADMIN_EMAIL = 'admin@foo.com'
STUDENT_EMAIL = 'student@foo.com'
STUDENT_NAME = 'A S Tudent'
COURSE_NAME = 'assessment_tests'
DUE_DATE_IN_PAST = '1995-06-15 12:00'
DUE_DATE_IN_FUTURE = '2035-06-15 12:00'
class EmbeddedAssessmentTests(actions.TestBase):
def setUp(self):
super(EmbeddedAssessmentTests, self).setUp()
self.base = '/' + COURSE_NAME
self.app_context = actions.simple_add_course(
COURSE_NAME, ADMIN_EMAIL, 'Learning Resources')
self.course = courses.Course(None, self.app_context)
self.assessment = self.course.add_assessment()
self.assessment.availability = courses.AVAILABILITY_AVAILABLE
self.course.save()
self.embed_url = 'modules/embed/v1/resource/assessment/%s' % (
self.assessment.unit_id)
actions.login(STUDENT_EMAIL, is_admin=False)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
super(EmbeddedAssessmentTests, self).tearDown()
def set_workflow_field(self, name, value):
workflow_dict = {}
if self.assessment.workflow_yaml:
workflow_dict = yaml.safe_load(self.assessment.workflow_yaml)
workflow_dict[name] = value
self.assessment.workflow_yaml = yaml.safe_dump(workflow_dict)
self.course.save()
def test_assessment_is_embedded(self):
response = self.get(self.embed_url)
self.assertEquals(302, response.status_int)
redirect_url = response.headers['Location']
dom = self.parse_html_string(self.get(redirect_url).body)
self.assertEquals('hide-controls', dom.attrib['class'])
def test_returns_to_assessment_after_grading(self):
# Read the assessment grading URI from the assessment page
redirect_url = self.get(self.embed_url).headers['Location']
dom = self.parse_html_string(self.get(redirect_url).body)
div = dom.find('.//div[@data-unit-id="%s"]' % self.assessment.unit_id)
xsrf_token = div.get('data-xsrf-token')
grader_uri = div.get('data-grader-uri')
self.assertEquals('answer?embedded=true', grader_uri)
# Post a response to the provided grading URI and examine redirect URI
post_args = {
'assessment_type': self.assessment.unit_id,
'score': '0.0',
'xsrf_token': xsrf_token
}
response = self.post(grader_uri, post_args)
self.assertEquals(302, response.status_int)
redirect_uri = response.headers['Location']
parsed_uri = urlparse.urlparse(redirect_uri)
self.assertEquals('/%s/assessment' % COURSE_NAME, parsed_uri.path)
actual_query = urlparse.parse_qs(parsed_uri.query)
expected_query = {
'onsubmit': ['true'],
'name': [str(self.assessment.unit_id)],
'embedded': ['true']
}
self.assertEquals(expected_query, actual_query)
# Confirm that the redirect uri is embeded
response_body = self.get(redirect_uri).body
dom = self.parse_html_string(response_body)
self.assertEquals('hide-controls', dom.attrib['class'])
# The confirmation message is shown
self.assertIn(
'cbShowMsgAutoHide(\'Assessment submitted.\')', response_body)
def test_peer_review_is_not_embeddable(self):
self.assessment.workflow_yaml = yaml.safe_dump({'grader': 'human'})
self.course.save()
redirect_url = self.get(self.embed_url).headers['Location']
dom = self.parse_html_string(self.get(redirect_url).body)
self.assertEquals('hide-controls', dom.attrib['class'])
self.assertEquals(
'Peer-review assignments cannot be embedded in external pages.',
dom.find('.//*[@class="gcb-article"]').text.strip())
def test_email_of_record_is_shown_to_student(self):
def assert_message_and_email(message):
# The message and email are seen in embedded assessments
redirect_url = self.get(self.embed_url).headers['Location']
response = self.get(redirect_url)
dom = self.parse_html_string(response.body)
top_info = etree.ElementTree.tostring(
dom.find('.//*[@class="assessment-top-info"]'))
self.assertIn(message, top_info)
self.assertIn(STUDENT_EMAIL, top_info)
# The message and email are not shown in non-embedded assessments
response = self.get('assessment?name=%s' % self.assessment.unit_id)
dom = self.parse_html_string(response.body)
top_info = etree.ElementTree.tostring(
dom.find('.//*[@class="assessment-top-info"]'))
self.assertNotIn(message, top_info)
self.assertNotIn(STUDENT_EMAIL, top_info)
# Assessment is open, and no answers recorded
assert_message_and_email(
'Your answers will be recorded under the email')
# Assessment is closed, and no answers recorded
self.set_workflow_field('submission_due_date', DUE_DATE_IN_PAST)
assert_message_and_email(
'You have not submitted any answers to this assignment under the '
'email')
# Submit assignment
self.set_workflow_field('submission_due_date', DUE_DATE_IN_FUTURE)
actions.submit_assessment(self, self.assessment.unit_id, {
'assessment_type': self.assessment.unit_id,
'score': '75.0',
'answers': transforms.dumps({
'rawScore': 3,
'totalWeight': 4,
'percentScore': 75})
})
# Assessment is open, and some answers recorded
assert_message_and_email(
'Your answers will be recorded under the email')
# Assessment is closed, and some answers recorded
self.set_workflow_field('submission_due_date', DUE_DATE_IN_PAST)
assert_message_and_email(
'Your answers have been recorded under the email')
class AssessmentsTests(actions.TestBase):
def setUp(self):
super(AssessmentsTests, self).setUp()
self.app_context = actions.simple_add_course(
COURSE_NAME, ADMIN_EMAIL, 'Some say he wears parsley in his ears')
self.course = courses.Course(None, self.app_context)
self.base = '/' + COURSE_NAME
self.assessment = self.course.add_assessment()
self.assessment.availability = courses.AVAILABILITY_AVAILABLE
self.assessment.workflow_yaml = yaml.safe_dump({
'grader': 'human',
'matcher': 'peer',
'review_due_date': None,
'review_min_count': 0,
'review_window_mins': 0,
'show_feedback': False,
'single_submission': False,
'submission_due_date': None, # The setting we are testing.
})
self.course.save()
def tearDown(self):
super(AssessmentsTests, self).tearDown()
def test_submit_peer_reviewed_assessment_with_no_due_date(self):
# Just looking to not get an exception on submission_due_date being
# None.
actions.login(STUDENT_EMAIL, is_admin=False)
actions.register(self, 'John Smith')
actions.submit_assessment(self, self.assessment.unit_id, {
'assessment_type': self.assessment.unit_id,
'score': '75.0',
'answers': transforms.dumps({
'rawScore': 3,
'totalWeight': 4,
'percentScore': 75})
})
| apache-2.0 |
benagricola/exabgp | lib/exabgp/configuration/environment.py | 1 | 9259 | # encoding: utf-8
"""
environment.py
Created by Thomas Mangin on 2011-11-29.
Copyright (c) 2011-2015 Exa Networks. All rights reserved.
"""
# XXX: raised exception not caught
# XXX: reloading mid-program not possible
# XXX: validation for path, file, etc not correctly test (ie surely buggy)
import os
import sys
import pwd
import syslog
from exabgp.util.ip import isip
# ===================================================================== NoneDict
#
class NoneDict (dict):
def __getitem__ (self, name):
return None
nonedict = NoneDict()
# ================================================================== environment
# XXX: FIXME: Upper case for class !
class environment (object):
# class returned on issues
class Error (Exception):
pass
application = 'unset'
# the configuration to be set by the program
configuration = {}
# the final parsed settings
_settings = None
location = os.path.normpath(sys.argv[0]) if sys.argv[0].startswith('/') else os.path.normpath(os.path.join(os.getcwd(),sys.argv[0]))
log_levels = ['EMERG', 'ALERT', 'CRIT', 'CRITICAL', 'ERR', 'ERROR', 'WARNING', 'NOTICE', 'INFO', 'DEBUG']
@staticmethod
def setup (conf):
if environment._settings:
# nosetest is performing the setup multiple times, so we can not raise anymore
# raise RuntimeError('You already initialised the environment')
return environment._settings
environment._settings = _env(conf)
return environment._settings
@staticmethod
def settings ():
if not environment._settings:
raise RuntimeError('You can not have an import using settings() before main() initialised environment')
return environment._settings
@staticmethod
def root (path):
roots = environment.location.split(os.sep)
location = []
for index in range(len(roots)-1,-1,-1):
if roots[index] == 'lib':
if index:
location = roots[:index]
break
root = os.path.join(*location)
paths = [
os.path.normpath(os.path.join(os.path.join(os.sep,root,path))),
os.path.normpath(os.path.expanduser(environment.unquote(path))),
os.path.normpath(os.path.join('/',path)),
]
return paths
@staticmethod
def integer (_):
return int(_)
@staticmethod
def real (_):
return float(_)
@staticmethod
def lowunquote (_):
return _.strip().strip('\'"').lower()
@staticmethod
def unquote (_):
return _.strip().strip('\'"')
@staticmethod
def quote (_):
return "'%s'" % str(_)
@staticmethod
def nop (_):
return _
@staticmethod
def boolean (_):
return _.lower() in ('1','yes','on','enable','true')
@staticmethod
def api (_):
encoder = _.lower()
if encoder not in ('text','json'):
raise TypeError('invalid encoder')
return encoder
@staticmethod
def methods (_):
return _.upper().split()
@staticmethod
def list (_):
return "'%s'" % ' '.join(_)
@staticmethod
def lower (_):
return str(_).lower()
@staticmethod
def ip (_):
if isip(_):
return _
raise TypeError('ip %s is invalid' % _)
@staticmethod
def optional_ip (_):
if not _ or isip(_):
return _
raise TypeError('ip %s is invalid' % _)
@staticmethod
def user (_):
# XXX: incomplete
try:
pwd.getpwnam(_)
# uid = answer[2]
except KeyError:
raise TypeError('user %s is not found on this system' % _)
return _
@staticmethod
def folder (path):
paths = environment.root(path)
options = [p for p in paths if os.path.exists(path)]
if not options:
raise TypeError('%s does not exists' % path)
first = options[0]
if not first:
raise TypeError('%s does not exists' % first)
return first
@staticmethod
def path (path):
split = sys.argv[0].split('lib/exabgp')
if len(split) > 1:
prefix = os.sep.join(split[:1])
if prefix and path.startswith(prefix):
path = path[len(prefix):]
home = os.path.expanduser('~')
if path.startswith(home):
return "'~%s'" % path[len(home):]
return "'%s'" % path
@staticmethod
def conf (path):
first = environment.folder(path)
if not os.path.isfile(first):
raise TypeError('%s is not a file' % path)
return first
@staticmethod
def exe (path):
first = environment.conf(path)
if not os.access(first, os.X_OK):
raise TypeError('%s is not an executable' % first)
return first
@staticmethod
def syslog (path):
path = environment.unquote(path)
if path in ('stdout','stderr'):
return path
if path.startswith('host:'):
return path
return path
@staticmethod
def redirector (name):
if name == 'url' or name.startswith('icap://'):
return name
raise TypeError('invalid redirector protocol %s, options are url or header' % name)
@staticmethod
def syslog_value (log):
if log not in environment.log_levels:
if log == 'CRITICAL':
log = 'CRIT'
if log == 'ERROR':
log = 'ERR'
raise TypeError('invalid log level %s' % log)
return getattr(syslog,'LOG_%s' % log)
@staticmethod
def syslog_name (log):
for name in environment.log_levels:
if name == 'CRITICAL':
name = 'CRIT'
if name == 'ERROR':
name = 'ERR'
if getattr(syslog,'LOG_%s' % name) == log:
return name
raise TypeError('invalid log level %s' % log)
@staticmethod
def umask_read (_):
return int(_, 8)
@staticmethod
def umask_write (_):
return "'%s'" % (oct(_))
@staticmethod
def default ():
for section in sorted(environment.configuration):
if section in ('internal','debug'):
continue
for option in sorted(environment.configuration[section]):
values = environment.configuration[section][option]
default = "'%s'" % values['value'] if values['write'] in (environment.list,environment.path,environment.quote,environment.syslog) else values['value']
yield '%s.%s.%s %s: %s. default (%s)' % (environment.application,section,option,' '*(20-len(section)-len(option)),values['help'],default)
@staticmethod
def iter_ini (diff=False):
for section in sorted(environment._settings):
if section in ('internal','debug'):
continue
header = '\n[%s.%s]' % (environment.application,section)
for k in sorted(environment._settings[section]):
v = environment._settings[section][k]
if diff and environment.configuration[section][k]['read'](environment.configuration[section][k]['value']) == v:
continue
if header:
yield header
header = ''
yield '%s = %s' % (k,environment.configuration[section][k]['write'](v))
@staticmethod
def iter_env (diff=False):
for section,values in environment._settings.items():
if section in ('internal','debug'):
continue
for k,v in values.items():
if diff and environment.configuration[section][k]['read'](environment.configuration[section][k]['value']) == v:
continue
if environment.configuration[section][k]['write'] == environment.quote:
yield "%s.%s.%s='%s'" % (environment.application,section,k,v)
continue
yield "%s.%s.%s=%s" % (environment.application,section,k,environment.configuration[section][k]['write'](v))
# ========================================================================= _env
#
import ConfigParser
from exabgp.util.hashtable import HashTable
def _env (conf):
here = os.path.join(os.sep,*os.path.join(environment.location.split(os.sep)))
location, directory = os.path.split(here)
while directory:
if directory == 'lib':
location = os.path.join(location,'lib')
break
location, directory = os.path.split(location)
# we did not break - ie, we did not find the location in the normal path.
else:
# let's try to see if we are running from the QA folder (for unittesting)
location, directory = os.path.split(here)
while directory:
if directory == 'dev':
location = os.path.join(location,'lib')
break
location, directory = os.path.split(location)
else:
# oh ! bad, let set the path to something ...
location = '/lib'
_conf_paths = []
if conf:
_conf_paths.append(os.path.abspath(os.path.normpath(conf)))
if location:
_conf_paths.append(os.path.normpath(os.path.join(location,'etc',environment.application,'%s.env' % environment.application)))
_conf_paths.append(os.path.normpath(os.path.join('/','etc',environment.application,'%s.env' % environment.application)))
env = HashTable()
ini = ConfigParser.ConfigParser()
ini_files = [path for path in _conf_paths if os.path.exists(path)]
if ini_files:
ini.read(ini_files[0])
for section in environment.configuration:
default = environment.configuration[section]
for option in default:
convert = default[option]['read']
try:
proxy_section = '%s.%s' % (environment.application,section)
env_name = '%s.%s' % (proxy_section,option)
rep_name = env_name.replace('.','_')
if env_name in os.environ:
conf = os.environ.get(env_name)
elif rep_name in os.environ:
conf = os.environ.get(rep_name)
else:
conf = environment.unquote(ini.get(proxy_section,option,nonedict))
# name without an = or : in the configuration and no value
if conf is None:
conf = default[option]['value']
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
conf = default[option]['value']
try:
env.setdefault(section,HashTable())[option] = convert(conf)
except TypeError:
raise environment.Error('invalid value for %s.%s : %s' % (section,option,conf))
return env
| bsd-3-clause |
rdipietro/tensorflow | tensorflow/contrib/distributions/python/ops/inverse_gamma.py | 6 | 8883 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class InverseGamma(distribution.Distribution):
"""The `InverseGamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```
where GammaInc is the upper incomplete Gamma function.
Examples:
```python
dist = InverseGamma(alpha=3.0, beta=2.0)
dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the scale params of the distribution(s).
beta must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and
`log_prob(x)`. If `validate_args` is `False` and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._beta],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("alpha", "beta"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Scale parameter."""
return self._beta
def _batch_shape(self):
return array_ops.shape(self.alpha + self.beta)
def _get_batch_shape(self):
return common_shapes.broadcast_shape(self.alpha.get_shape(),
self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
"""See the documentation for tf.random_gamma for more details."""
return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,
dtype=self.dtype, seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.alpha, self.beta / x)
@distribution_util.AppendDocstring(
"""This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.""")
def _entropy(self):
return (self.alpha +
math_ops.log(self.beta) +
math_ops.lgamma(self.alpha) -
(1. + self.alpha) * math_ops.digamma(self.alpha))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is
`False`, an exception will be raised rather than returning `NaN`""")
def _mean(self):
mean = self.beta / (self.alpha - 1.)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 1., mean,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype), self.alpha,
message="mean not defined for components of self.alpha <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `alpha > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.beta) /
(math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 2., var,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype), self.alpha,
message="variance not defined for components of alpha <= 2"),
], var)
def _mode(self):
"""The mode of an inverse gamma distribution is `beta / (alpha + 1)`."""
return self.beta / (self.alpha + 1.)
class InverseGammaWithSoftplusAlphaBeta(InverseGamma):
"""Inverse Gamma with softplus applied to `alpha` and `beta`."""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha),
beta=nn.softplus(beta),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| apache-2.0 |
undoware/neutron-drive | google_appengine/lib/django_1_2/django/db/models/sql/query.py | 43 | 81062 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from django.utils.copycompat import deepcopy
from django.utils.tree import Node
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import select_related_descend, InvalidQuery
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.validate_sql(sql)
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def validate_sql(self, sql):
if not sql.lower().strip().startswith('select'):
raise InvalidQuery('Raw queries are limited to SELECT queries. Use '
'connection.cursor directly for other types of queries.')
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % self.params)
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
self.alias_map = {} # Maps alias to join information
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.rev_join_map = {} # Reverse of join_map.
self.quote_cache = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in.
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
return sql % params
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.rev_join_map = self.rev_join_map.copy()
obj.quote_cache = {}
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.add_extra({'a': 1}, None, None, None, None, None)
q.select = []
q.select_fields = []
q.default_cols = False
q.select_related = False
q.set_extra_mask(('a',))
q.set_aggregate_mask(())
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER)
new_alias = self.join(rhs.rev_join_map[alias],
(conjunction and not first), used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# the first join that is exclusive to the lhs (self) must be converted
# to an outer join.
if not conjunction:
for alias in self.tables[1:]:
if self.alias_refcount[alias] == 1:
self.promote_alias(alias, True)
break
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
columns = set()
orig_opts = self.model._meta
seen = {}
must_include = {self.model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = opts.get_field_by_name(name)[0].rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.iteritems():
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.iteritems():
callback(target, model, values)
else:
for model, values in must_include.iteritems():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= 1
def promote_alias(self, alias, unconditional=False):
"""
Promotes the join type of an alias to an outer join if it's possible
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
Returns True if the join was promoted.
"""
if ((unconditional or self.alias_map[alias][NULLABLE]) and
self.alias_map[alias][JOIN_TYPE] != self.LOUTER):
data = list(self.alias_map[alias])
data[JOIN_TYPE] = self.LOUTER
self.alias_map[alias] = tuple(data)
return True
return False
def promote_alias_chain(self, chain, must_promote=False):
"""
Walks along a chain of aliases, promoting the first nullable join and
any joins following that. If 'must_promote' is True, all the aliases in
the chain are promoted.
"""
for alias in chain:
if self.promote_alias(alias, must_promote):
must_promote = True
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
# FIXME: There's some (a lot of!) overlap with the similar OR promotion
# in add_filter(). It's not quite identical, but is very similar. So
# pulling out the common bits is something for later.
considered = {}
for alias in self.tables:
if alias not in used_aliases:
continue
if (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
parent = self.alias_map[alias][LHS_ALIAS]
must_promote = considered.get(parent, False)
promoted = self.promote_alias(alias, must_promote)
considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.iteritems():
alias_data = list(self.alias_map[old_alias])
alias_data[RHS_ALIAS] = new_alias
t = self.rev_join_map[old_alias]
data = list(self.join_map[t])
data[data.index(old_alias)] = new_alias
self.join_map[t] = tuple(data)
self.rev_join_map[new_alias] = t
del self.rev_join_map[old_alias]
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = tuple(alias_data)
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data[TABLE_NAME]]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in self.alias_map.iteritems():
lhs = data[LHS_ALIAS]
if lhs in change_map:
data = list(data)
data[LHS_ALIAS] = change_map[lhs]
self.alias_map[alias] = tuple(data)
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = {}
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count.
"""
return len([1 for count in self.alias_refcount.itervalues() if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs][TABLE_NAME]
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias][LHS_ALIAS] != lhs:
continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif promote or outer_if_first:
join_type = self.LOUTER
else:
join_type = self.INNER
join = (table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
self.rev_join_map[alias] = t_ident
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
for field, model in opts.get_fields_with_model():
if model not in seen:
if model is proxied_model:
seen[model] = root_alias
else:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def need_force_having(self, q_object):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
for child in q_object.children:
if isinstance(child, Node):
if self.need_force_having(child):
return True
else:
if child[0].split(LOOKUP_SEP)[0] in self.aggregates:
return True
return False
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
for column_alias in join_list:
self.promote_alias(column_alias, unconditional=True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True, force_having=False):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif hasattr(value, 'evaluate'):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
if parts[0] in self.aggregates:
aggregate = self.aggregates[parts[0]]
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, connector)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_alias_chain(join_list)
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
col, alias, join_list = self.trim_joins(target, join_list, last, trim)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
table_promote = False
join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
join_promote = self.promote_alias(join)
if table != join:
table_promote = self.promote_alias(table)
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote)
if having_clause or force_having:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if not (lookup_type == 'in'
and not hasattr(value, 'as_sql')
and not hasattr(value, '_as_sql')
and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
# We also need to handle the case where a subquery is provided
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None, force_having=False):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
if q_object.connector == OR and not force_having:
force_having = self.need_force_having(q_object)
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
if force_having:
self.having.start_subtree(connector)
else:
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases, force_having=force_having)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases, force_having=force_having)
if force_having:
self.having.end_subtree()
else:
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customised behaviour.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = get_proxied_model(opts)
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
# In case of a recursive FK, use the to_field for
# reverse lookups as well
if orig_field.model is local_field.model:
target = opts.get_field(field.rel.field_name)
else:
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves less table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL] or join[JOIN_TYPE] != self.INNER:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr, can_reuse=can_reuse)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
# Adding extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0]
query.where.add((Constraint(alias, col, None), 'isnull', False), AND)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(query.alias_refcount.itervalues()) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join[RHS_JOIN_COL]:
self.unref_alias(final_alias)
final_alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
joins = joins[:-1]
self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys()
names.sort()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_unicode(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(param_iter.next())
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = set(field_names).difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = set(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]][LHS_JOIN_COL]
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info[LHS_JOIN_COL] != select_col
or join_info[JOIN_TYPE] != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info[RHS_ALIAS]
select_col = join_info[RHS_JOIN_COL]
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def get_proxied_model(opts):
int_opts = opts
proxied_model = None
while int_opts.proxy:
proxied_model = int_opts.proxy_for_model
int_opts = proxied_model._meta
return proxied_model
| bsd-3-clause |
reflash/zosftp-sublime | third_party/appdirs.py | 335 | 22374 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| gpl-3.0 |
markslwong/tensorflow | tensorflow/tensorboard/backend/event_processing/event_accumulator.py | 28 | 31775 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import threading
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf.config_pb2 import RunMetadata
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.tensorboard.backend.event_processing import directory_watcher
from tensorflow.tensorboard.backend.event_processing import event_file_loader
from tensorflow.tensorboard.backend.event_processing import plugin_asset_util
from tensorflow.tensorboard.backend.event_processing import reservoir
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value'])
HealthPillEvent = namedtuple(
'HealthPillEvent',
['wall_time', 'step', 'node_name', 'output_slot', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue', ['min', 'max', 'num', 'sum',
'sum_squares', 'bucket_limit',
'bucket'])
ImageEvent = namedtuple('ImageEvent', ['wall_time', 'step',
'encoded_image_string', 'width',
'height'])
AudioEvent = namedtuple('AudioEvent', ['wall_time', 'step',
'encoded_audio_string', 'content_type',
'sample_rate', 'length_frames'])
TensorEvent = namedtuple('TensorEvent', ['wall_time', 'step', 'tensor_proto'])
## Different types of summary events handled by the event_accumulator
SUMMARY_TYPES = {
'simple_value': '_ProcessScalar',
'histo': '_ProcessHistogram',
'image': '_ProcessImage',
'audio': '_ProcessAudio',
'tensor': '_ProcessTensor',
}
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
AUDIO = 'audio'
SCALARS = 'scalars'
TENSORS = 'tensors'
HEALTH_PILLS = 'health_pills'
GRAPH = 'graph'
META_GRAPH = 'meta_graph'
RUN_METADATA = 'run_metadata'
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
AUDIO: 4,
SCALARS: 10000,
# We store this many health pills per op.
HEALTH_PILLS: 100,
HISTOGRAMS: 1,
TENSORS: 10,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
AUDIO: 0,
SCALARS: 0,
HEALTH_PILLS: 0,
HISTOGRAMS: 0,
TENSORS: 0,
}
# The tag that values containing health pills have. Health pill data is stored
# in tensors. In order to distinguish health pill values from scalar values, we
# rely on how health pill values have this special tag value.
HEALTH_PILL_EVENT_TAG = '__health_pill__'
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file.
Args:
path: A file path to check if it is an event file.
Raises:
ValueError: If the path is an empty string.
Returns:
If path is formatted like a TensorFlowEventsFile.
"""
if not path:
raise ValueError('Path must be a nonempty string')
return 'tfevents' in compat.as_str_any(os.path.basename(path))
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
The `Reload()` method synchronously loads all of the data written so far.
Histograms, audio, and images are very large, so storing all of them is not
recommended.
@@Tensors
"""
def __init__(self,
path,
size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS,
purge_orphaned_data=True):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._first_event_timestamp = None
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
# Unlike the other reservoir, the reservoir for health pills is keyed by the
# name of the op instead of the tag. This lets us efficiently obtain the
# health pills per node.
self._health_pills = reservoir.Reservoir(size=sizes[HEALTH_PILLS])
self._graph = None
self._graph_from_metagraph = False
self._meta_graph = None
self._tagged_metadata = {}
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False)
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._audio = reservoir.Reservoir(size=sizes[AUDIO])
self._tensors = reservoir.Reservoir(size=sizes[TENSORS])
self._generator_mutex = threading.Lock()
self.path = path
self._generator = _GeneratorFromPath(path)
self._compression_bps = compression_bps
self.purge_orphaned_data = purge_orphaned_data
self.most_recent_step = -1
self.most_recent_wall_time = -1
self.file_version = None
# The attributes that get built up by the accumulator
self.accumulated_attrs = ('_scalars', '_histograms',
'_compressed_histograms', '_images', '_audio')
self._tensor_summaries = {}
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Returns:
The `EventAccumulator`.
"""
with self._generator_mutex:
for event in self._generator.Load():
self._ProcessEvent(event)
return self
def PluginAssets(self, plugin_name):
"""Return a list of all plugin assets for the given plugin.
Args:
plugin_name: The string name of a plugin to retrieve assets for.
Returns:
A list of string plugin asset names, or empty list if none are available.
If the plugin was not registered, an empty list is returned.
"""
return plugin_asset_util.ListAssets(self.path, plugin_name)
def RetrievePluginAsset(self, plugin_name, asset_name):
"""Return the contents of a given plugin asset.
Args:
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)
def FirstEventTimestamp(self):
"""Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
"""
if self._first_event_timestamp is not None:
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
def _ProcessEvent(self, event):
"""Called whenever an event is loaded."""
if self._first_event_timestamp is None:
self._first_event_timestamp = event.wall_time
if event.HasField('file_version'):
new_file_version = _ParseFileVersion(event.file_version)
if self.file_version and self.file_version != new_file_version:
## This should not happen.
logging.warn(('Found new file_version for event.proto. This will '
'affect purging logic for TensorFlow restarts. '
'Old: {0} New: {1}').format(self.file_version,
new_file_version))
self.file_version = new_file_version
self._MaybePurgeOrphanedData(event)
## Process the event.
# GraphDef and MetaGraphDef are handled in a special way:
# If no graph_def Event is available, but a meta_graph_def is, and it
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
# If a graph_def Event is available, always prefer it to the graph_def
# inside the meta_graph_def.
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run, or there was '
'a metagraph containing a graph_def, as well as one or '
'more graph events. Overwriting the graph with the '
'newest event.'))
self._graph = event.graph_def
self._graph_from_metagraph = False
elif event.HasField('meta_graph_def'):
if self._meta_graph is not None:
logging.warn(('Found more than one metagraph event per run. '
'Overwriting the metagraph with the newest event.'))
self._meta_graph = event.meta_graph_def
if self._graph is None or self._graph_from_metagraph:
# We may have a graph_def in the metagraph. If so, and no
# graph_def is directly available, use this one instead.
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
if meta_graph.graph_def:
if self._graph is not None:
logging.warn(('Found multiple metagraphs containing graph_defs,'
'but did not find any graph events. Overwriting the '
'graph with the newest metagraph version.'))
self._graph_from_metagraph = True
self._graph = meta_graph.graph_def.SerializeToString()
elif event.HasField('tagged_run_metadata'):
tag = event.tagged_run_metadata.tag
if tag in self._tagged_metadata:
logging.warn('Found more than one "run metadata" event with tag ' +
tag + '. Overwriting it with the newest event.')
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('tensor') and value.tag == HEALTH_PILL_EVENT_TAG:
self._ProcessHealthPillSummary(value, event)
else:
for summary_type, summary_func in SUMMARY_TYPES.items():
if value.HasField(summary_type):
datum = getattr(value, summary_type)
tag = value.node_name if summary_type == 'tensor' else value.tag
getattr(self, summary_func)(tag, event.wall_time, event.step,
datum)
def _ProcessHealthPillSummary(self, value, event):
"""Process summaries containing health pills.
These summaries are distinguished by the fact that they have a Tensor field
and have a special tag value.
This method emits ERROR-level messages to the logs if it encounters Tensor
summaries that it cannot process.
Args:
value: A summary_pb2.Summary.Value with a Tensor field.
event: The event_pb2.Event containing that value.
"""
elements = tensor_util.MakeNdarray(value.tensor)
# The node_name property of the value object is actually a watch key: a
# combination of node name, output slot, and a suffix. We capture the
# actual node name and the output slot with a regular expression.
match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
if not match:
logging.log_first_n(
logging.ERROR,
'Unsupported watch key %s for health pills; skipping this sequence.',
1,
value.node_name)
return
node_name = match.group(1)
output_slot = int(match.group(2))
self._ProcessHealthPill(
event.wall_time, event.step, node_name, output_slot, elements)
def Tags(self):
"""Return all tags found in the value stream.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
return {
IMAGES: self._images.Keys(),
AUDIO: self._audio.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
TENSORS: self._tensors.Keys(),
# Use a heuristic: if the metagraph is available, but
# graph is not, then we assume the metagraph contains the graph.
GRAPH: self._graph is not None,
META_GRAPH: self._meta_graph is not None,
RUN_METADATA: list(self._tagged_metadata.keys())
}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ScalarEvent`s.
"""
return self._scalars.Items(tag)
def HealthPills(self, node_name):
"""Returns all health pill values for a certain node.
Args:
node_name: The name of the node to obtain health pills for.
Raises:
KeyError: If the node name is not found.
Returns:
An array of `HealthPillEvent`s.
"""
return self._health_pills.Items(node_name)
def GetOpsWithHealthPills(self):
"""Determines which ops have at least 1 health pill event.
Returns:
A list of names of ops with at least 1 health pill event.
"""
return self._health_pills.Keys()
def Graph(self):
"""Return the graph definition, if there is one.
If the graph is stored directly, return that. If no graph is stored
directly but a metagraph is stored containing a graph, return that.
Raises:
ValueError: If there is no graph for this run.
Returns:
The `graph_def` proto.
"""
graph = graph_pb2.GraphDef()
if self._graph is not None:
graph.ParseFromString(self._graph)
return graph
raise ValueError('There is no graph in this EventAccumulator')
def MetaGraph(self):
"""Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto.
"""
if self._meta_graph is None:
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph
def RunMetadata(self, tag):
"""Given a tag, return the associated session.run() metadata.
Args:
tag: A string tag associated with the event.
Raises:
ValueError: If the tag is not found.
Returns:
The metadata in form of `RunMetadata` proto.
"""
if tag not in self._tagged_metadata:
raise ValueError('There is no run metadata with this tag name')
run_metadata = RunMetadata()
run_metadata.ParseFromString(self._tagged_metadata[tag])
return run_metadata
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `HistogramEvent`s.
"""
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `CompressedHistogramEvent`s.
"""
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ImageEvent`s.
"""
return self._images.Items(tag)
def Audio(self, tag):
"""Given a summary tag, return all associated audio.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `AudioEvent`s.
"""
return self._audio.Items(tag)
def Tensors(self, tag):
"""Given a summary tag, return all associated tensors.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `TensorEvent`s.
"""
return self._tensors.Items(tag)
def _MaybePurgeOrphanedData(self, event):
"""Maybe purge orphaned data due to a TensorFlow crash.
When TensorFlow crashes at step T+O and restarts at step T, any events
written after step T are now "orphaned" and will be at best misleading if
they are included in TensorBoard.
This logic attempts to determine if there is orphaned data, and purge it
if it is found.
Args:
event: The event to use as a reference, to determine if a purge is needed.
"""
if not self.purge_orphaned_data:
return
## Check if the event happened after a crash, and purge expired tags.
if self.file_version and self.file_version >= 2:
## If the file_version is recent enough, use the SessionLog enum
## to check for restarts.
self._CheckForRestartAndMaybePurge(event)
else:
## If there is no file version, default to old logic of checking for
## out of order steps.
self._CheckForOutOfOrderStepAndMaybePurge(event)
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
"""Check for out-of-order event.step and discard expired events for tags.
Check if the event is out of order relative to the global most recent step.
If it is, purge outdated summaries for tags that the event contains.
Args:
event: The event to use as reference. If the event is out-of-order, all
events with the same tags, but with a greater event.step will be purged.
"""
if event.step < self.most_recent_step and event.HasField('summary'):
self._Purge(event, by_tags=True)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
def _ConvertHistogramProtoToTuple(self, histo):
return HistogramValue(min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket))
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a proto histogram by adding it to accumulated state."""
histo = self._ConvertHistogramProtoToTuple(histo)
histo_ev = HistogramEvent(wall_time, step, histo)
self._histograms.AddItem(tag, histo_ev)
self._compressed_histograms.AddItem(
tag, histo_ev, lambda x: _CompressHistogram(x, self._compression_bps))
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self._images.AddItem(tag, event)
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self._audio.AddItem(tag, event)
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _ProcessTensor(self, tag, wall_time, step, tensor):
tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor)
self._tensors.AddItem(tag, tv)
def _ProcessHealthPill(self, wall_time, step, node_name, output_slot,
elements):
"""Processes a health pill value by adding it to accumulated state.
Args:
wall_time: The time at which the health pill was created. Provided by the
debugger.
step: The step at which the health pill was created. Provided by the
debugger.
node_name: The name of the node for this health pill.
output_slot: The output slot for this health pill.
elements: An ND array of 12 floats. The elements of the health pill.
"""
# Key by the node name for fast retrieval of health pills by node name. The
# array is cast to a list so that it is JSON-able. The debugger data plugin
# serves a JSON response.
self._health_pills.AddItem(
node_name,
HealthPillEvent(
wall_time=wall_time,
step=step,
node_name=node_name,
output_slot=output_slot,
value=list(elements)))
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
if by_tags:
def _ExpiredPerTag(value):
return [getattr(self, x).FilterItems(_NotExpired, value.tag)
for x in self.accumulated_attrs]
expired_per_tags = [_ExpiredPerTag(value)
for value in event.summary.value]
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
else:
expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
for x in self.accumulated_attrs]
if sum(expired_per_type) > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, *expired_per_type)
logging.warn(purge_msg)
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, {} images, '
'and {} audio.').format(most_recent_step, most_recent_wall_time,
event_step, event_wall_time,
num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
if not path:
raise ValueError('path must be a valid string')
if IsTensorFlowEventsFile(path):
return event_file_loader.EventFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(
path, event_file_loader.EventFileLoader, IsTensorFlowEventsFile)
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logging.warn(('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
def _CompressHistogram(histo_ev, bps):
"""Creates fixed size histogram by adding compression to accumulated state.
This routine transforms a histogram at a particular step by linearly
interpolating its variable number of buckets to represent their cumulative
weight at a constant number of compression points. This significantly reduces
the size of the histogram and makes it suitable for a two-dimensional area
plot where the output of this routine constitutes the ranges for a single x
coordinate.
Args:
histo_ev: A HistogramEvent namedtuple.
bps: Compression points represented in basis points, 1/100ths of a percent.
Returns:
CompressedHistogramEvent namedtuple.
"""
# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc
histo = histo_ev.histogram_value
if not histo.num:
return CompressedHistogramEvent(
histo_ev.wall_time,
histo_ev.step,
[CompressedHistogramValue(b, 0.0) for b in bps])
bucket = np.array(histo.bucket)
weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum()
values = []
j = 0
while j < len(bps):
i = np.searchsorted(weights, bps[j], side='right')
while i < len(weights):
cumsum = weights[i]
cumsum_prev = weights[i - 1] if i > 0 else 0.0
if cumsum == cumsum_prev: # prevent remap divide by zero
i += 1
continue
if not i or not cumsum_prev:
lhs = histo.min
else:
lhs = max(histo.bucket_limit[i - 1], histo.min)
rhs = min(histo.bucket_limit[i], histo.max)
weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs)
values.append(CompressedHistogramValue(bps[j], weight))
j += 1
break
else:
break
while j < len(bps):
values.append(CompressedHistogramValue(bps[j], histo.max))
j += 1
return CompressedHistogramEvent(histo_ev.wall_time, histo_ev.step, values)
def _Remap(x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
| apache-2.0 |
kizniche/Mycodo | mycodo/inputs/rpi_signal_revolutions.py | 1 | 4596 | # coding=utf-8
import time
import copy
from mycodo.inputs.base_input import AbstractInput
# Measurements
measurements_dict = {
0: {
'measurement': 'revolutions',
'unit': 'rpm'
}
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'SIGNAL_RPM',
'input_manufacturer': 'Raspberry Pi',
'input_name': 'Signal (Revolutions)',
'input_library': 'pigpio',
'measurements_name': 'RPM',
'measurements_dict': measurements_dict,
'options_enabled': [
'gpio_location',
'rpm_pulses_per_rev',
'weighting',
'sample_time',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'dependencies_module': [
('internal', 'file-exists /opt/mycodo/pigpio_installed', 'pigpio')
],
'interfaces': ['GPIO'],
'weighting': 0.0,
'sample_time': 2.0,
'rpm_pulses_per_rev': 1.0
}
class InputModule(AbstractInput):
""" A sensor support class that monitors rpm """
def __init__(self, input_dev, testing=False):
super(InputModule, self).__init__(input_dev, testing=testing, name=__name__)
self.pigpio = None
self.gpio = None
self.weighting = None
self.rpm_pulses_per_rev = None
self.sample_time = None
if not testing:
self.initialize_input()
def initialize_input(self):
import pigpio
self.pigpio = pigpio
self.gpio = int(self.input_dev.gpio_location)
self.weighting = self.input_dev.weighting
self.rpm_pulses_per_rev = self.input_dev.rpm_pulses_per_rev
self.sample_time = self.input_dev.sample_time
def get_measurement(self):
""" Gets the revolutions """
pi = self.pigpio.pi()
if not pi.connected: # Check if pigpiod is running
self.logger.error("Could not connect to pigpiod. Ensure it is running and try again.")
return None
self.return_dict = copy.deepcopy(measurements_dict)
read_revolutions = ReadRPM(pi, self.gpio, self.pigpio, self.rpm_pulses_per_rev, self.weighting)
time.sleep(self.sample_time)
rpm = read_revolutions.RPM()
read_revolutions.cancel()
pi.stop()
self.value_set(0, rpm)
return self.return_dict
class ReadRPM:
"""
A class to read pulses and calculate the RPM
"""
def __init__(self, pi, gpio, pigpio, pulses_per_rev=1.0, weighting=0.0):
"""
Instantiate with the Pi and gpio of the RPM signal
to monitor.
Optionally the number of pulses for a complete revolution
may be specified. It defaults to 1.
Optionally a weighting may be specified. This is a number
between 0 and 1 and indicates how much the old reading
affects the new reading. It defaults to 0 which means
the old reading has no effect. This may be used to
smooth the data.
"""
self.pigpio = pigpio
self.pi = pi
self.gpio = gpio
self.pulses_per_rev = pulses_per_rev
self._watchdog = 200 # Milliseconds.
if weighting < 0.0:
weighting = 0.0
elif weighting > 0.99:
weighting = 0.99
self._new = 1.0 - weighting # Weighting for new reading.
self._old = weighting # Weighting for old reading.
self._high_tick = None
self._period = None
pi.set_mode(self.gpio, self.pigpio.INPUT)
self._cb = pi.callback(self.gpio, self.pigpio.RISING_EDGE, self._cbf)
pi.set_watchdog(self.gpio, self._watchdog)
def _cbf(self, gpio, level, tick):
if level == 1: # Rising edge.
if self._high_tick is not None:
t = self.pigpio.tickDiff(self._high_tick, tick)
if self._period is not None:
self._period = (self._old * self._period) + (self._new * t)
else:
self._period = t
self._high_tick = tick
elif level == 2: # Watchdog timeout.
if self._period is not None:
if self._period < 2000000000:
self._period += self._watchdog * 1000
def RPM(self):
"""
Returns the RPM.
"""
rpm = 0
if self._period is not None:
rpm = 60000000.0 / (self._period * self.pulses_per_rev)
return rpm
def cancel(self):
"""
Cancels the reader and releases resources.
"""
self.pi.set_watchdog(self.gpio, 0) # cancel watchdog
self._cb.cancel()
| gpl-3.0 |
ekaputra07/gempa-monitor | lib/oauth2client/multistore_file.py | 144 | 13935 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from anyjson import simplejson
from oauth2client.client import Storage as BaseStorage
from oauth2client.client import Credentials
from oauth2client import util
from locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
@util.positional(4)
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or iterable of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
# Recreate the legacy key with these specific parameters
key = {'clientId': client_id, 'userAgent': user_agent,
'scope': util.scopes_to_string(scope)}
return get_credential_storage_custom_key(
filename, key, warn_on_readonly=warn_on_readonly)
@util.positional(2)
def get_credential_storage_custom_string_key(
filename, key_string, warn_on_readonly=True):
"""Get a Storage instance for a credential using a single string as a key.
Allows you to provide a string as a custom key that will be used for
credential storage and retrieval.
Args:
filename: The JSON file storing a set of credentials
key_string: A string to use as the key for storing this credential.
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
# Create a key dictionary that can be used
key_dict = {'key': key_string}
return get_credential_storage_custom_key(
filename, key_dict, warn_on_readonly=warn_on_readonly)
@util.positional(2)
def get_credential_storage_custom_key(
filename, key_dict, warn_on_readonly=True):
"""Get a Storage instance for a credential using a dictionary as a key.
Allows you to provide a dictionary as a custom key that will be used for
credential storage and retrieval.
Args:
filename: The JSON file storing a set of credentials
key_dict: A dictionary to use as the key for storing this credential. There
is no ordering of the keys in the dictionary. Logically equivalent
dictionaries will produce equivalent storage keys.
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)
key = util.dict_to_tuple_key(key_dict)
return multistore._get_storage(key)
@util.positional(1)
def get_all_credential_keys(filename, warn_on_readonly=True):
"""Gets all the registered credential keys in the given Multistore.
Args:
filename: The JSON file storing a set of credentials
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
A list of the credential keys present in the file. They are returned as
dictionaries that can be passed into get_credential_storage_custom_key to
get the actual credentials.
"""
multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)
multistore._lock()
try:
return multistore._get_all_credential_keys()
finally:
multistore._unlock()
@util.positional(1)
def _get_multistore(filename, warn_on_readonly=True):
"""A helper method to initialize the multistore with proper locking.
Args:
filename: The JSON file storing a set of credentials
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
A multistore object
"""
filename = os.path.expanduser(filename)
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly))
finally:
_multistores_lock.release()
return multistore
class _MultiStore(object):
"""A file backed store for multiple credentials."""
@util.positional(2)
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# ((key, value), (key, value)...) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, key):
self._multistore = multistore
self._key = key
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(self._key)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(self._key, credentials)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._key)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
key = util.dict_to_tuple_key(raw_key)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = dict(cred_key)
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_all_credential_keys(self):
"""Gets all the registered credential keys in the multistore.
Returns:
A list of dictionaries corresponding to all the keys currently registered
"""
return [dict(key) for key in self._data.keys()]
def _get_credential(self, key):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
key: The key used to retrieve the credential
Returns:
The credential specified or None if not present
"""
return self._data.get(key, None)
def _update_credential(self, key, cred):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
key: The key used to retrieve the credential
cred: The OAuth2Credential to update/set
"""
self._data[key] = cred
self._write()
def _delete_credential(self, key):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
key: The key used to retrieve the credential
"""
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, key):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
key: The key used to retrieve the credential
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, key)
| mit |
sinraf96/electrum | src/test/bctest.py | 57 | 1536 | # Copyright 2014 BitPay, Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import subprocess
import os
import json
import sys
def bctest(testDir, testObj, exeext):
execprog = testObj['exec'] + exeext
execargs = testObj['args']
execrun = [execprog] + execargs
stdinCfg = None
inputData = None
if "input" in testObj:
filename = testDir + "/" + testObj['input']
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
outputFn = None
outputData = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputData = open(testDir + "/" + outputFn).read()
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
print("OSError, Failed to execute " + execprog)
sys.exit(1)
if outputData and (outs[0] != outputData):
print("Output data mismatch for " + outputFn)
sys.exit(1)
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
print("Return code mismatch for " + outputFn)
sys.exit(1)
def bctester(testDir, input_basename, buildenv):
input_filename = testDir + "/" + input_basename
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
for testObj in input_data:
bctest(testDir, testObj, buildenv.exeext)
sys.exit(0)
| mit |
lokeshjindal15/gem5_transform | src/dev/Terminal.py | 66 | 1972 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class Terminal(SimObject):
type = 'Terminal'
cxx_header = "dev/terminal.hh"
intr_control = Param.IntrControl(Parent.any, "interrupt controller")
port = Param.TcpPort(3456, "listen port")
number = Param.Int(0, "terminal number")
output = Param.Bool(True, "Enable output dump to file")
| bsd-3-clause |
aronbierbaum/txsuds | suds/xsd/__init__.py | 205 | 3007 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace, splitPrefix
log = getLogger(__name__)
def qualify(ref, resolvers, defns=Namespace.default):
"""
Get a reference that is I{qualified} by namespace.
@param ref: A referenced schema type name.
@type ref: str
@param resolvers: A list of objects to be used to resolve types.
@type resolvers: [L{sax.element.Element},]
@param defns: An optional target namespace used to qualify references
when no prefix is specified.
@type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed.
@return: A qualified reference.
@rtype: (name, namespace-uri)
"""
ns = None
p, n = splitPrefix(ref)
if p is not None:
if not isinstance(resolvers, (list, tuple)):
resolvers = (resolvers,)
for r in resolvers:
resolved = r.resolvePrefix(p)
if resolved[1] is not None:
ns = resolved
break
if ns is None:
raise Exception('prefix (%s) not resolved' % p)
else:
ns = defns
return (n, ns[1])
def isqref(object):
"""
Get whether the object is a I{qualified reference}.
@param object: An object to be tested.
@type object: I{any}
@rtype: boolean
@see: L{qualify}
"""
return (\
isinstance(object, tuple) and \
len(object) == 2 and \
isinstance(object[0], basestring) and \
isinstance(object[1], basestring))
class Filter:
def __init__(self, inclusive=False, *items):
self.inclusive = inclusive
self.items = items
def __contains__(self, x):
if self.inclusive:
result = ( x in self.items )
else:
result = ( x not in self.items )
return result
| lgpl-3.0 |
Dark-Hacker/horizon | openstack_dashboard/dashboards/project/access_and_security/floating_ips/views.py | 65 | 2958 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing floating IPs.
"""
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import forms as project_forms
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import workflows as project_workflows
class AssociateView(workflows.WorkflowView):
workflow_class = project_workflows.IPAssociationWorkflow
class AllocateView(forms.ModalFormView):
form_class = project_forms.FloatingIpAllocate
form_id = "associate_floating_ip_form"
modal_header = _("Allocate Floating IP")
template_name = 'project/access_and_security/floating_ips/allocate.html'
submit_label = _("Allocate IP")
submit_url = reverse_lazy(
"horizon:project:access_and_security:floating_ips:allocate")
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_display(self, obj):
return obj.ip
def get_context_data(self, **kwargs):
context = super(AllocateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_quota_usages(self.request)
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
try:
pools = api.network.floating_ip_pools_list(self.request)
except neutron_exc.ConnectionFailed:
pools = []
exceptions.handle(self.request)
except Exception:
pools = []
exceptions.handle(self.request,
_("Unable to retrieve floating IP pools."))
pool_list = [(pool.id, pool.name) for pool in pools]
if not pool_list:
pool_list = [(None, _("No floating IP pools available"))]
return {'pool_list': pool_list}
| apache-2.0 |
jideobs/twilioAngular | venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py | 224 | 39043 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| mit |
bootphon/crossitlearn | simple_dnn.py | 1 | 32993 | """
A deep neural network with or w/o dropout in one file.
"""
import numpy
import theano
import sys
import math
from theano import tensor as T
from theano import shared
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
BATCH_SIZE = 100
STACKSIZE = 69
def relu_f(vec):
""" Wrapper to quickly change the rectified linear unit function """
return (vec + abs(vec)) / 2.
def softplus_f(v):
return T.nnet.softplus(v)
def dropout(rng, x, p=0.5):
""" Zero-out random values in x with probability p using rng """
if p > 0. and p < 1.:
seed = rng.randint(2 ** 30)
srng = theano.tensor.shared_randomstreams.RandomStreams(seed)
mask = srng.binomial(n=1, p=1.-p, size=x.shape,
dtype=theano.config.floatX)
return x * mask
return x
def fast_dropout(rng, x):
""" Multiply activations by N(1,1) """
seed = rng.randint(2 ** 30)
srng = RandomStreams(seed)
mask = srng.normal(size=x.shape, avg=1., dtype=theano.config.floatX)
return x * mask
def build_shared_zeros(shape, name):
""" Builds a theano shared variable filled with a zeros numpy array """
return shared(value=numpy.zeros(shape, dtype=theano.config.floatX),
name=name, borrow=True)
class Linear(object):
""" Basic linear transformation layer (W.X + b) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
W_values *= 4 # This works for sigmoid activated networks!
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b = build_shared_zeros((n_out,), 'b')
self.input = input
self.W = W
self.b = b
self.params = [self.W, self.b]
self.output = T.dot(self.input, self.W) + self.b
if fdrop:
self.output = fast_dropout(rng, self.output)
def __repr__(self):
return "Linear"
class SigmoidLayer(Linear):
""" Sigmoid activation layer (sigmoid(W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation)
self.output = T.nnet.sigmoid(self.pre_activation)
class ReLU(Linear):
""" Rectified Linear Unit activation layer (max(0, W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
if b is None:
b = build_shared_zeros((n_out,), 'b')
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation)
self.output = relu_f(self.pre_activation)
class SoftPlus(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=0.):
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(SoftPlus, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation, fdrop)
self.output = softplus_f(self.pre_activation)
class DatasetMiniBatchIterator(object):
""" Basic mini-batch iterator """
def __init__(self, x, y, batch_size=BATCH_SIZE, randomize=False):
self.x = x
self.y = y
self.batch_size = batch_size
self.randomize = randomize
from sklearn.utils import check_random_state
self.rng = check_random_state(42)
def __iter__(self):
n_samples = self.x.shape[0]
if self.randomize:
for _ in xrange(n_samples / BATCH_SIZE):
if BATCH_SIZE > 1:
i = int(self.rng.rand(1) * ((n_samples+BATCH_SIZE-1) / BATCH_SIZE))
else:
i = int(math.floor(self.rng.rand(1) * n_samples))
yield (i, self.x[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
else:
for i in xrange((n_samples + self.batch_size - 1)
/ self.batch_size):
yield (self.x[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
class LogisticRegression:
"""Multi-class Logistic Regression
"""
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if W != None:
self.W = W
else:
self.W = build_shared_zeros((n_in, n_out), 'W')
if b != None:
self.b = b
else:
self.b = build_shared_zeros((n_out,), 'b')
# P(Y|X) = softmax(W.X + b)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.output = self.y_pred
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def negative_log_likelihood_sum(self, y):
return -T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def log_loss(self, y):
# TODO
log_y_hat = T.log(self.p_y_given_x)
#ll = log_y_hat[T.arange(y.shape[0]), y] + log_y_hat[T.arange(y.shape[0]), 1-y]
#return -T.mean(ll)
def training_cost(self, y):
""" Wrapper for standard name """
return self.negative_log_likelihood_sum(y)
#return self.log_loss(y) TODO
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError("y should have the same shape as self.y_pred",
("y", y.type, "y_pred", self.y_pred.type))
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
print("!!! y should be of int type")
return T.mean(T.neq(self.y_pred, numpy.asarray(y, dtype='int')))
class NeuralNet(object):
""" Neural network (not regularized, without dropout) """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=40*3,
layers_types=[Linear, ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[1024, 1024, 1024, 1024],
n_outs=62 * 3,
rho=0.95, eps=1.E-6,
max_norm=0.,
debugprint=False):
"""
TODO
"""
self.layers = []
self.params = []
self.n_layers = len(layers_types)
self.layers_types = layers_types
assert self.n_layers > 0
self.max_norm = max_norm
self._rho = rho # ``momentum'' for adadelta
self._eps = eps # epsilon for adadelta
self._accugrads = [] # for adadelta
self._accudeltas = [] # for adadelta
self._old_dxs = [] # for adadelta with Nesterov
if theano_rng == None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.x = T.fmatrix('x')
self.y = T.ivector('y')
self.layers_ins = [n_ins] + layers_sizes
self.layers_outs = layers_sizes + [n_outs]
layer_input = self.x
for layer_type, n_in, n_out in zip(layers_types,
self.layers_ins, self.layers_outs):
this_layer = layer_type(rng=numpy_rng,
input=layer_input, n_in=n_in, n_out=n_out)
assert hasattr(this_layer, 'output')
self.params.extend(this_layer.params)
self._accugrads.extend([build_shared_zeros(t.shape.eval(),
'accugrad') for t in this_layer.params])
self._accudeltas.extend([build_shared_zeros(t.shape.eval(),
'accudelta') for t in this_layer.params])
self._old_dxs.extend([build_shared_zeros(t.shape.eval(),
'old_dxs') for t in this_layer.params])
self.layers.append(this_layer)
layer_input = this_layer.output
assert hasattr(self.layers[-1], 'training_cost')
assert hasattr(self.layers[-1], 'errors')
# TODO standardize cost
self.mean_cost = self.layers[-1].negative_log_likelihood(self.y)
self.cost = self.layers[-1].training_cost(self.y)
#self.mean_cost = self.layers[-1].training_cost(self.y) # TODO
if debugprint:
theano.printing.debugprint(self.cost)
self.errors = self.layers[-1].errors(self.y)
def __repr__(self):
dimensions_layers_str = map(lambda x: "x".join(map(str, x)),
zip(self.layers_ins, self.layers_outs))
return "_".join(map(lambda x: "_".join((x[0].__name__, x[1])),
zip(self.layers_types, dimensions_layers_str)))
def get_SGD_trainer(self):
""" Returns a plain SGD minibatch trainer with learning rate as param.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
# using mean_cost so that the learning rate is not too dependent
# on the batch size
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
if self.max_norm:
W = param - gparam * learning_rate
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param - gparam * learning_rate
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def get_adagrad_trainer(self):
""" Returns an Adagrad (Duchi et al. 2010) trainer using a learning rate.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, param, gparam in zip(self._accugrads, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = accugrad + gparam * gparam
dx = - (learning_rate / T.sqrt(agrad + self._eps)) * gparam
if self.max_norm:
W = param + dx
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def get_adadelta_trainer(self):
""" Returns an Adadelta (Zeiler 2012) trainer using self._rho and
self._eps params.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
# compute the gradients with respect to the model parameters
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, accudelta, param, gparam in zip(self._accugrads,
self._accudeltas, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = self._rho * accugrad + (1 - self._rho) * gparam * gparam
dx = - T.sqrt((accudelta + self._eps)
/ (agrad + self._eps)) * gparam
updates[accudelta] = (self._rho * accudelta
+ (1 - self._rho) * dx * dx)
if self.max_norm:
W = param + dx
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def score_classif(self, given_set):
""" Returns functions to get current classification errors. """
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
score = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.errors,
givens={self.x: batch_x, self.y: batch_y})
def scoref():
""" returned function that scans the entire set given as input """
return [score(batch_x, batch_y) for batch_x, batch_y in given_set]
return scoref
class RegularizedNet(NeuralNet):
""" Neural net with L1 and L2 regularization """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=100,
layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[1024, 1024, 1024],
n_outs=2,
rho=0.9, eps=1.E-6,
L1_reg=0.,
L2_reg=0.,
max_norm=0.,
debugprint=False):
"""
TODO
"""
super(RegularizedNet, self).__init__(numpy_rng, theano_rng, n_ins,
layers_types, layers_sizes, n_outs, rho, eps, max_norm,
debugprint)
L1 = shared(0.)
for param in self.params:
L1 += T.sum(abs(param))
if L1_reg > 0.:
self.cost = self.cost + L1_reg * L1
L2 = shared(0.)
for param in self.params:
L2 += T.sum(param ** 2)
if L2_reg > 0.:
self.cost = self.cost + L2_reg * L2
class DropoutNet(NeuralNet):
""" Neural net with dropout (see Hinton's et al. paper) """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=40*3,
layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[4000, 4000, 4000, 4000],
dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
n_outs=62 * 3,
rho=0.98, eps=1.E-6,
max_norm=0.,
fast_drop=True,
debugprint=False):
"""
TODO
"""
super(DropoutNet, self).__init__(numpy_rng, theano_rng, n_ins,
layers_types, layers_sizes, n_outs, rho, eps, max_norm,
debugprint)
self.dropout_rates = dropout_rates
if fast_drop:
if dropout_rates[0]:
dropout_layer_input = fast_dropout(numpy_rng, self.x)
else:
dropout_layer_input = self.x
else:
dropout_layer_input = dropout(numpy_rng, self.x, p=dropout_rates[0])
self.dropout_layers = []
for layer, layer_type, n_in, n_out, dr in zip(self.layers,
layers_types, self.layers_ins, self.layers_outs,
dropout_rates[1:] + [0]): # !!! we do not dropout anything
# from the last layer !!!
if dr:
if fast_drop:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W, b=layer.b, fdrop=True)
else:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W * 1. / (1. - dr),
b=layer.b * 1. / (1. - dr))
# N.B. dropout with dr==1 does not dropanything!!
this_layer.output = dropout(numpy_rng, this_layer.output, dr)
else:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W, b=layer.b)
assert hasattr(this_layer, 'output')
self.dropout_layers.append(this_layer)
dropout_layer_input = this_layer.output
assert hasattr(self.layers[-1], 'training_cost')
assert hasattr(self.layers[-1], 'errors')
# TODO standardize cost
# these are the dropout costs
self.mean_cost = self.dropout_layers[-1].negative_log_likelihood(self.y)
self.cost = self.dropout_layers[-1].training_cost(self.y)
# these is the non-dropout errors
self.errors = self.layers[-1].errors(self.y)
def __repr__(self):
return super(DropoutNet, self).__repr__() + "\n"\
+ "dropout rates: " + str(self.dropout_rates)
def add_fit_and_score(class_to_chg):
""" Mutates a class to add the fit() and score() functions to a NeuralNet.
"""
from types import MethodType
def fit(self, x_train, y_train, x_dev=None, y_dev=None,
max_epochs=20, early_stopping=True, split_ratio=0.1, # TODO 100+ epochs
method='adadelta', verbose=False, plot=False):
"""
TODO
"""
import time, copy
if x_dev == None or y_dev == None:
from sklearn.cross_validation import train_test_split
x_train, x_dev, y_train, y_dev = train_test_split(x_train, y_train,
test_size=split_ratio, random_state=42)
if method == 'sgd':
train_fn = self.get_SGD_trainer()
elif method == 'adagrad':
train_fn = self.get_adagrad_trainer()
elif method == 'adadelta':
train_fn = self.get_adadelta_trainer()
elif method == 'adadelta_rprop':
train_fn = self.get_adadelta_rprop_trainer()
train_set_iterator = DatasetMiniBatchIterator(x_train, y_train)
dev_set_iterator = DatasetMiniBatchIterator(x_dev, y_dev)
train_scoref = self.score_classif(train_set_iterator)
dev_scoref = self.score_classif(dev_set_iterator)
best_dev_loss = numpy.inf
epoch = 0
# TODO early stopping (not just cross val, also stop training)
if plot:
verbose = True
self._costs = []
self._train_errors = []
self._dev_errors = []
self._updates = []
while epoch < max_epochs:
if not verbose:
sys.stdout.write("\r%0.2f%%" % (epoch * 100./ max_epochs))
sys.stdout.flush()
avg_costs = []
timer = time.time()
for x, y in train_set_iterator:
if method == 'sgd' or 'adagrad' in method:
avg_cost = train_fn(x, y, lr=1.E-2)
elif 'adadelta' in method:
avg_cost = train_fn(x, y)
if type(avg_cost) == list:
avg_costs.append(avg_cost[0])
else:
avg_costs.append(avg_cost)
if verbose:
mean_costs = numpy.mean(avg_costs)
mean_train_errors = numpy.mean(train_scoref())
print(' epoch %i took %f seconds' %
(epoch, time.time() - timer))
print(' epoch %i, avg costs %f' %
(epoch, mean_costs))
print(' method %s, epoch %i, training error %f' %
(method, epoch, mean_train_errors))
if plot:
self._costs.append(mean_costs)
self._train_errors.append(mean_train_errors)
dev_errors = numpy.mean(dev_scoref())
if plot:
self._dev_errors.append(dev_errors)
if dev_errors < best_dev_loss:
best_dev_loss = dev_errors
best_params = copy.deepcopy(self.params)
if verbose:
print('!!! epoch %i, validation error of best model %f' %
(epoch, dev_errors))
epoch += 1
if not verbose:
print("")
for i, param in enumerate(best_params):
self.params[i] = param
def score(self, x, y):
""" error rates """
iterator = DatasetMiniBatchIterator(x, y)
scoref = self.score_classif(iterator)
return numpy.mean(scoref())
class_to_chg.fit = MethodType(fit, None, class_to_chg)
class_to_chg.score = MethodType(score, None, class_to_chg)
if __name__ == "__main__":
add_fit_and_score(DropoutNet)
add_fit_and_score(RegularizedNet)
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
from scipy.ndimage import convolve
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = numpy.concatenate([X] +
[numpy.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = numpy.concatenate([Y for _ in range(5)], axis=0)
return X, Y
from sklearn import datasets, svm, naive_bayes
from sklearn import cross_validation, preprocessing
SPOKEN_WORDS = True
MNIST = False
DIGITS = False
NUDGE_DIGITS = True
FACES = False
TWENTYNEWSGROUPS = False
VERBOSE = True
SCALE = True
PLOT = True
def train_models(x_train, y_train, x_test, y_test, n_features, n_outs,
use_dropout=False, n_epochs=100, numpy_rng=None, # TODO 200+ epochs
svms=False, nb=False, deepnn=True, name=''):
if svms:
print("Linear SVM")
classifier = svm.SVC(gamma=0.001)
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
print("RBF-kernel SVM")
classifier = svm.SVC(kernel='rbf', class_weight='auto')
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
if nb:
print("Multinomial Naive Bayes")
classifier = naive_bayes.MultinomialNB()
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
if deepnn:
import warnings
warnings.filterwarnings("ignore") # TODO remove
if use_dropout:
n_epochs *= 4
pass
def new_dnn(dropout=False):
if dropout:
print("Dropout DNN")
return DropoutNet(numpy_rng=numpy_rng, n_ins=n_features,
#layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
layers_types=[SoftPlus, SoftPlus, SoftPlus, SoftPlus, LogisticRegression],
layers_sizes=[2000, 2000, 2000, 2000],
dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
n_outs=n_outs,
max_norm=4.,
fast_drop=False,
debugprint=0)
else:
print("Simple (regularized) DNN")
return RegularizedNet(numpy_rng=numpy_rng, n_ins=n_features,
#layers_types=[LogisticRegression],
#layers_sizes=[],
#layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
#layers_types=[SoftPlus, SoftPlus, SoftPlus, LogisticRegression],
#layers_sizes=[1000, 1000, 1000],
layers_types=[ReLU, LogisticRegression],
layers_sizes=[200],
n_outs=n_outs,
#L1_reg=0.001/x_train.shape[0],
#L2_reg=0.001/x_train.shape[0],
L1_reg=0.,
L2_reg=1./x_train.shape[0],
max_norm=0.,
debugprint=0)
import matplotlib.pyplot as plt
plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(222)
ax3 = plt.subplot(223)
ax4 = plt.subplot(224) # TODO updates of the weights
methods = ['adadelta']
for method in methods:
dnn = new_dnn(use_dropout)
print dnn
dnn.fit(x_train, y_train, max_epochs=n_epochs, method=method, verbose=VERBOSE, plot=PLOT)
test_error = dnn.score(x_test, y_test)
print("score: %f" % (1. - test_error))
ax1.plot(numpy.log10(dnn._costs), label=method)
#ax2.plot(numpy.log10(dnn._train_errors), label=method)
#ax3.plot(numpy.log10(dnn._dev_errors), label=method)
ax2.plot(dnn._train_errors, label=method)
ax3.plot(dnn._dev_errors, label=method)
#ax4.plot(dnn._updates, label=method) TODO
ax4.plot([test_error for _ in range(10)], label=method)
ax1.set_xlabel('epoch')
ax1.set_ylabel('cost (log10)')
ax2.set_xlabel('epoch')
ax2.set_ylabel('train error')
ax3.set_xlabel('epoch')
ax3.set_ylabel('dev error')
ax4.set_ylabel('test error')
plt.legend()
plt.savefig('training_log' + name + '.png')
if MNIST:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
X = numpy.asarray(mnist.data, dtype='float32')
if SCALE:
#X = preprocessing.scale(X)
X /= 255.
y = numpy.asarray(mnist.target, dtype='int32')
#target_names = mnist.target_names
print("Total dataset size:")
print("n samples: %d" % X.shape[0])
print("n features: %d" % X.shape[1])
print("n classes: %d" % len(set(y)))
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, X.shape[1],
len(set(y)), numpy_rng=numpy.random.RandomState(123),
name='MNIST')
if DIGITS:
digits = datasets.load_digits()
data = numpy.asarray(digits.data, dtype='float32')
target = numpy.asarray(digits.target, dtype='int32')
x = data
y = target
if NUDGE_DIGITS:
x, y = nudge_dataset(x, y)
if SCALE:
x = preprocessing.scale(x)
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, x.shape[1],
len(set(target)), numpy_rng=numpy.random.RandomState(123),
name='digits')
if FACES:
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
lfw_people = datasets.fetch_lfw_people(min_faces_per_person=70,
resize=0.4)
X = numpy.asarray(lfw_people.data, dtype='float32')
if SCALE:
X = preprocessing.scale(X)
y = numpy.asarray(lfw_people.target, dtype='int32')
target_names = lfw_people.target_names
print("Total dataset size:")
print("n samples: %d" % X.shape[0])
print("n features: %d" % X.shape[1])
print("n classes: %d" % target_names.shape[0])
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, X.shape[1],
len(set(y)), numpy_rng=numpy.random.RandomState(123),
name='faces')
if TWENTYNEWSGROUPS:
from sklearn.feature_extraction.text import TfidfVectorizer
newsgroups_train = datasets.fetch_20newsgroups(subset='train')
vectorizer = TfidfVectorizer(encoding='latin-1', max_features=10000)
#vectorizer = HashingVectorizer(encoding='latin-1')
x_train = vectorizer.fit_transform(newsgroups_train.data)
x_train = numpy.asarray(x_train.todense(), dtype='float32')
y_train = numpy.asarray(newsgroups_train.target, dtype='int32')
newsgroups_test = datasets.fetch_20newsgroups(subset='test')
x_test = vectorizer.transform(newsgroups_test.data)
x_test = numpy.asarray(x_test.todense(), dtype='float32')
y_test = numpy.asarray(newsgroups_test.target, dtype='int32')
train_models(x_train, y_train, x_test, y_test, x_train.shape[1],
len(set(y_train)),
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=True, deepnn=True,
name='20newsgroups')
if SPOKEN_WORDS:
# words done by "say", shapes of their filterbanks
#>>> shapes
#array([[62, 40],
# [65, 40],
# [58, 40],
# ...,
# [85, 40],
# [79, 40],
# [51, 40]])
#>>> shapes.mean(axis=0)
#array([ 70.87751196, 40. ])
#>>> shapes.std(axis=0)
#array([ 12.94580736, 0. ])
#>>> shapes.min(axis=0)
#array([39, 40])
words_fbanks = numpy.load("all_words_pascal1k.npz")
n_tokens = len([k for k in words_fbanks.keys()])
lexicon = set([w.split('_')[1] for w in words_fbanks.keys()])
lexicon = [w for w in lexicon] # we need an ordered collection
n_words = len(lexicon)
all_fbanks = numpy.concatenate([v for _, v in words_fbanks.iteritems()])
print all_fbanks.shape
mean = all_fbanks.mean(axis=0)
print mean.shape
std = all_fbanks.std(axis=0)
print std.shape
# take 69 fbanks in the middle of the word and pad with 0s if needed
X = numpy.zeros((n_tokens, 40*STACKSIZE), dtype='float32')
y = numpy.zeros(n_tokens, dtype='int32')
for i, (swf, fb) in enumerate(words_fbanks.iteritems()):
spkr, word, _ = swf.split('_')
l = fb.shape[0]
m = l/2
s = max(0, m - ((STACKSIZE-1) / 2))
e = min(l-1, m + ((STACKSIZE-1) / 2))
tmp = (fb - mean) / std
tmp = tmp[s:e+1].flatten()
diff = 40*STACKSIZE - tmp.shape[0]
if not diff:
X[i] = tmp
else:
X[i][diff/2:-diff/2] = tmp
y[i] = lexicon.index(word)
# train the DNN, with the training set as test set if let in this form:
train_models(X, y, X, y, X.shape[1],
len(set(y)),
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=False, deepnn=True,
name='spoken_words')
| mit |
utkarsh-goswami/erpnext | erpnext/config/selling.py | 22 | 7241 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Sales"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Quotation",
"description": _("Quotes to Leads or Customers."),
},
{
"type": "doctype",
"name": "Sales Order",
"description": _("Confirmed orders from Customers."),
},
]
},
{
"label": _("Customers"),
"items": [
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
{
"type": "doctype",
"label": _("Customer Group"),
"name": "Customer Group",
"icon": "fa fa-sitemap",
"link": "Tree/Customer Group",
"description": _("Manage Customer Group Tree."),
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
},
{
"type": "doctype",
"name": "Address",
"description": _("All Addresses."),
},
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Item",
"description": _("All Products or Services."),
},
{
"type": "doctype",
"name": "Product Bundle",
"description": _("Bundle items at time of sale."),
},
{
"type": "doctype",
"name": "Price List",
"description": _("Price List master.")
},
{
"type": "doctype",
"name": "Item Group",
"icon": "fa fa-sitemap",
"label": _("Item Group"),
"link": "Tree/Item Group",
"description": _("Tree of Item Groups."),
},
{
"type": "doctype",
"name": "Item Price",
"description": _("Multiple Item prices."),
"route": "Report/Item Price"
},
{
"type": "doctype",
"name": "Shipping Rule",
"description": _("Rules for adding shipping costs.")
},
{
"type": "doctype",
"name": "Pricing Rule",
"description": _("Rules for applying pricing and discount.")
},
]
},
{
"label": _("Sales Partners and Territory"),
"items": [
{
"type": "doctype",
"label": _("Territory"),
"name": "Territory",
"icon": "fa fa-sitemap",
"link": "Tree/Territory",
"description": _("Manage Territory Tree."),
},
{
"type": "doctype",
"name": "Sales Partner",
"description": _("Manage Sales Partners."),
},
{
"type": "doctype",
"label": _("Sales Person"),
"name": "Sales Person",
"icon": "fa fa-sitemap",
"link": "Tree/Sales Person",
"description": _("Manage Sales Person Tree."),
},
{
"type": "report",
"is_query_report": True,
"name": "Addresses And Contacts",
"label": _("Sales Partner Addresses And Contacts"),
"doctype": "Address",
"route_options": {
"party_type": "Sales Partner"
}
},
{
"type": "report",
"is_query_report": True,
"name": "Territory Target Variance (Item Group-Wise)",
"route": "query-report/Territory Target Variance Item Group-Wise",
"doctype": "Territory"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Person Target Variance (Item Group-Wise)",
"route": "query-report/Sales Person Target Variance Item Group-Wise",
"doctype": "Sales Person",
},
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Selling Settings",
"description": _("Default settings for selling transactions.")
},
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "doctype",
"name":"Terms and Conditions",
"label": _("Terms and Conditions Template"),
"description": _("Template of terms or contract.")
},
{
"type": "doctype",
"name": "Sales Taxes and Charges Template",
"description": _("Tax template for selling transactions.")
},
{
"type": "doctype",
"name": "Industry Type",
"description": _("Track Leads by Industry Type.")
},
]
},
{
"label": _("Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "page",
"name": "sales-analytics",
"label": _("Sales Analytics"),
"icon": "fa fa-bar-chart",
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "fa fa-bar-chart",
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Acquisition and Loyalty",
"doctype": "Customer",
"icon": "fa fa-bar-chart",
},
{
"type": "report",
"is_query_report": True,
"name": "Quotation Trends",
"doctype": "Quotation"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Order Trends",
"doctype": "Sales Order"
},
]
},
{
"label": _("Other Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Addresses And Contacts",
"label": _("Customer Addresses And Contacts"),
"doctype": "Address",
"route_options": {
"party_type": "Customer"
}
},
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Person-wise Transaction Summary",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Item-wise Sales History",
"doctype": "Item"
},
{
"type": "report",
"is_query_report": True,
"name": "BOM Search",
"doctype": "BOM"
},
{
"type": "report",
"is_query_report": True,
"name": "Inactive Customers",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Available Stock for Packing Items",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Pending SO Items For Purchase Request",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Credit Balance",
"doctype": "Customer"
},
]
},
{
"label": _("SMS"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
},
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"label": _("Customer and Supplier"),
"youtube_id": "anoGi_RpQ20"
},
{
"type": "help",
"label": _("Sales Order to Payment"),
"youtube_id": "7AMq4lqkN4A"
},
{
"type": "help",
"label": _("Point-of-Sale"),
"youtube_id": "4WkelWkbP_c"
},
]
},
]
| gpl-3.0 |
freieslabor/info-display | info_display/screens/event_schedule/management/commands/updateevents.py | 1 | 1290 | from django.core.management.base import BaseCommand
from django.conf import settings
from datetime import datetime
from pytz import timezone
from dateutil.tz import tzlocal
from icalendar import Calendar
import urllib.request
from ...models import Event, CalendarFeed
class Command(BaseCommand):
help = 'Updates event schedule and cleans past events.'
def handle(self, *args, **options):
# insert new events
for ical in CalendarFeed.objects.all():
with urllib.request.urlopen(ical.url) as f:
cal = Calendar.from_ical(f.read())
for event in cal.walk('vevent'):
# create datetime object and localize it
date = event['DTSTART'].dt
date_time = date.astimezone(timezone(settings.TIME_ZONE))
# create public transport schedule object and save it
schedule = Event(
id=event['UID'],
date=date_time,
title=event['SUMMARY']
)
schedule.save()
self.stdout.write('Successfully updated %s.' % ical.url)
# clear past events
Event.objects.filter(date__lt=datetime.now(tzlocal())).delete()
| mpl-2.0 |
desihub/desispec | py/desispec/pipeline/control.py | 1 | 46258 | #
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
"""
desispec.pipeline.control
===========================
Tools for controling pipeline production.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import re
import time
from collections import OrderedDict
import numpy as np
from desiutil.log import get_logger
from .. import io
from ..parallel import (dist_uniform, dist_discrete, dist_discrete_all,
stdouterr_redirected)
from .defs import (task_states, prod_options_name,
task_state_to_int, task_int_to_state)
from . import prod as pipeprod
from . import db as pipedb
from . import run as piperun
from . import tasks as pipetasks
from . import scriptgen as scriptgen
class clr:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
def disable(self):
self.HEADER = ""
self.OKBLUE = ""
self.OKGREEN = ""
self.WARNING = ""
self.FAIL = ""
self.ENDC = ""
def create(root=None, data=None, redux=None, prod=None, force=False,
basis=None, calib=None, db_sqlite=False, db_sqlite_path=None,
db_postgres=False, db_postgres_host="nerscdb03.nersc.gov",
db_postgres_port=5432, db_postgres_name="desidev",
db_postgres_user="desidev_admin", db_postgres_authorized="desidev_ro",
nside=64 ):
"""Create (or re-create) a production.
Args:
root (str): value to use for DESI_ROOT.
data (str): value to use for DESI_SPECTRO_DATA.
redux (str): value to use for DESI_SPECTRO_REDUX.
prod (str): value to use for SPECPROD.
force (bool): if True, overwrite existing production DB.
basis (str): value to use for DESI_BASIS_TEMPLATES.
calib (str): value to use for DESI_SPECTRO_CALIB.
db_sqlite (bool): if True, use SQLite for the DB.
db_sqlite_path (str): override path to SQLite DB.
db_postgres (bool): if True, use PostgreSQL for the DB.
db_postgres_host (str): PostgreSQL hostname.
db_postgres_port (int): PostgreSQL connection port number.
db_postgres_name (str): PostgreSQL DB name.
db_postgres_user (str): PostgreSQL user name.
db_postgres_authorized (str): Additional PostgreSQL users to
authorize.
nside (int): HEALPix nside value used for spectral grouping.
"""
log = get_logger()
# Check desi root location
desiroot = None
if root is not None:
desiroot = os.path.abspath(root)
os.environ["DESI_ROOT"] = desiroot
elif "DESI_ROOT" in os.environ:
desiroot = os.environ["DESI_ROOT"]
else:
log.error("You must set DESI_ROOT in your environment or "
"set the root keyword argument")
raise RuntimeError("Invalid DESI_ROOT")
# Check raw data location
rawdir = None
if data is not None:
rawdir = os.path.abspath(data)
os.environ["DESI_SPECTRO_DATA"] = rawdir
elif "DESI_SPECTRO_DATA" in os.environ:
rawdir = os.environ["DESI_SPECTRO_DATA"]
else:
log.error("You must set DESI_SPECTRO_DATA in your environment or "
"set the data keyword argument")
raise RuntimeError("Invalid DESI_SPECTRO_DATA")
# Check production name
prodname = None
if prod is not None:
prodname = prod
os.environ["SPECPROD"] = prodname
elif "SPECPROD" in os.environ:
prodname = os.environ["SPECPROD"]
else:
log.error("You must set SPECPROD in your environment or "
"set the prod keyword argument")
raise RuntimeError("Invalid SPECPROD")
# Check spectro redux location
specdir = None
if redux is not None:
specdir = os.path.abspath(redux)
os.environ["DESI_SPECTRO_REDUX"] = specdir
elif "DESI_SPECTRO_REDUX" in os.environ:
specdir = os.environ["DESI_SPECTRO_REDUX"]
else:
log.error("You must set DESI_SPECTRO_REDUX in your environment or "
"set the redux keyword argument")
raise RuntimeError("Invalid DESI_SPECTRO_REDUX")
proddir = os.path.join(specdir, prodname)
if os.path.exists(proddir) and not force :
log.error("Production {} exists.\n"
"Either remove this directory if you want to start fresh\n"
"or use 'desi_pipe update' to update a production\n"
"or rerun with --force option.".format(proddir))
raise RuntimeError("production already exists")
# Check basis template location
if basis is not None:
basis = os.path.abspath(basis)
os.environ["DESI_BASIS_TEMPLATES"] = basis
elif "DESI_BASIS_TEMPLATES" in os.environ:
basis = os.environ["DESI_BASIS_TEMPLATES"]
else:
log.error("You must set DESI_BASIS_TEMPLATES in your environment or "
"set the basis keyword argument")
raise RuntimeError("Invalid DESI_BASIS_TEMPLATES")
# Check calibration location
if calib is not None:
calib = os.path.abspath(calib)
os.environ["DESI_SPECTRO_CALIB"] = calib
elif "DESI_SPECTRO_CALIB" in os.environ:
calib = os.environ["DESI_SPECTRO_CALIB"]
else:
log.error("You must set DESI_SPECTRO_CALIB in your environment "
" or set the calib keyword argument")
raise RuntimeError("Invalid DESI_SPECTRO_CALIB")
# Construct our DB connection string
dbpath = None
if db_postgres:
# We are creating a new postgres backend. Explicitly create the
# database, so that we can get the schema key.
db = pipedb.DataBasePostgres(host=db_postgres_host,
port=db_postgres_port, dbname=db_postgres_name,
user=db_postgres_user, schema=None,
authorize=db_postgres_authorized)
dbprops = [
"postgresql",
db_postgres_host,
"{}".format(db_postgres_port),
db_postgres_name,
db_postgres_user,
db.schema
]
dbpath = ":".join(dbprops)
os.environ["DESI_SPECTRO_DB"] = dbpath
elif db_sqlite:
# We are creating a new sqlite backend
if db_sqlite_path is not None:
# We are using a non-default path
dbpath = os.path.abspath(db_sqlite_path)
else:
# We are using sqlite with the default location
dbpath = os.path.join(proddir, "desi.db")
if not os.path.isdir(proddir):
os.makedirs(proddir)
# Create the database
db = pipedb.DataBaseSqlite(dbpath, "w")
os.environ["DESI_SPECTRO_DB"] = dbpath
elif "DESI_SPECTRO_DB" in os.environ:
# We are using an existing prod
dbpath = os.environ["DESI_SPECTRO_DB"]
else:
# Error- we have to get the DB info from somewhere
log.error("You must set DESI_SPECTRO_DB in your environment or "
"use the db_sqlite or db_postgres arguments")
raise RuntimeError("Invalid DESI_SPECTRO_DB")
pipeprod.update_prod(nightstr=None, hpxnside=nside)
# create setup shell snippet
setupfile = os.path.abspath(os.path.join(proddir, "setup.sh"))
with open(setupfile, "w") as s:
s.write("# Generated by desi_pipe\n")
s.write("export DESI_ROOT={}\n\n".format(desiroot))
s.write("export DESI_BASIS_TEMPLATES={}\n".format(basis))
s.write("export DESI_SPECTRO_CALIB={}\n\n".format(calib))
s.write("export DESI_SPECTRO_DATA={}\n\n".format(rawdir))
s.write("# Production originally created at\n")
s.write("# $DESI_SPECTRO_REDUX={}\n".format(specdir))
s.write("# $SPECPROD={}\n".format(prodname))
s.write("#\n")
s.write("# Support the ability to move the production\n")
s.write("# - get abspath to directory where this script is located\n")
s.write("# - unpack proddir=$DESI_SPECTRO_REDUX/$SPECPROD\n\n")
s.write('proddir=$(cd $(dirname "$BASH_SOURCE"); pwd)\n')
s.write("export DESI_SPECTRO_REDUX=$(dirname $proddir)\n")
s.write("export SPECPROD=$(basename $proddir)\n\n")
# s.write("export DESI_SPECTRO_REDUX={}\n".format(specdir))
# s.write("export SPECPROD={}\n".format(specprod))
s.write("export DESI_SPECTRO_DB={}\n".format(dbpath))
s.write("\n")
if "DESI_LOGLEVEL" in os.environ:
s.write("export DESI_LOGLEVEL=\"{}\"\n\n"\
.format(os.environ["DESI_LOGLEVEL"]))
else:
s.write("#export DESI_LOGLEVEL=\"DEBUG\"\n\n")
log.info("\n\nTo use this production, you should do:\n%> source {}\n\n"\
.format(setupfile))
return
def update(nightstr=None, nside=64, expid=None):
"""Update a production.
Args:
nightstr (str): Comma separated (YYYYMMDD) or regex pattern. Only
nights matching these patterns will be considered.
nside (int): HEALPix nside value used for spectral grouping.
expid (int): Only update the production for a single exposure ID.
"""
pipeprod.update_prod(nightstr=nightstr, hpxnside=nside, expid=expid)
return
def get_tasks_type(db, tasktype, states, nights, expid=None, spec=None):
"""Get tasks of one type that match certain criteria.
Args:
db (DataBase): the production DB.
tasktype (str): a valid task type.
states (list): list of task states to select.
nights (list): list of nights to select.
expid (int): exposure ID to select.
spec (int): spectrograph to select.
Returns:
(list): list of tasks meeting the criteria.
"""
ntlist = ",".join(nights)
if (expid is not None) and (len(nights) > 1):
raise RuntimeError("Only one night should be specified when "
"getting tasks for a single exposure.")
tasks = list()
with db.cursor() as cur:
if tasktype == "spectra" or tasktype == "redshift":
cmd = "select pixel from healpix_frame where night in ({})".format(ntlist)
cur.execute(cmd)
pixels = np.unique([ x for (x,) in cur.fetchall() ]).tolist()
pixlist = ",".join([ str(p) for p in pixels])
cmd = "select name,state from {} where pixel in ({})".format(tasktype, pixlist)
cur.execute(cmd)
tasks = [ x for (x, y) in cur.fetchall() if \
task_int_to_state[y] in states ]
else :
cmd = "select name, state from {} where night in ({})"\
.format(tasktype, ntlist)
if expid is not None:
cmd = "{} and expid = {}".format(cmd, expid)
if spec is not None:
cmd = "{} and spec = {}".format(cmd, spec)
cur.execute(cmd)
tasks = [ x for (x, y) in cur.fetchall() if \
task_int_to_state[y] in states ]
return tasks
def get_tasks(db, tasktypes, nights, states=None, expid=None, spec=None,
nosubmitted=False, taskfile=None):
"""Get tasks of multiple types that match certain criteria.
Args:
db (DataBase): the production DB.
tasktypes (list): list of valid task types.
states (list): list of task states to select.
nights (list): list of nights to select.
expid (int): exposure ID to select.
spec (int): spectrograph to select.
nosubmitted (bool): if True, ignore tasks that were already
submitted.
Returns:
list: all tasks of all types.
"""
all_tasks = list()
for tt in tasktypes:
tasks = get_tasks_type(db, tt, states, nights, expid=expid, spec=spec)
if nosubmitted:
if (tt != "spectra") and (tt != "redshift"):
sb = db.get_submitted(tasks)
tasks = [ x for x in tasks if not sb[x] ]
all_tasks.extend(tasks)
return all_tasks
def tasks(tasktypes, nightstr=None, states=None, expid=None, spec=None,
nosubmitted=False, db_postgres_user="desidev_ro", taskfile=None):
"""Get tasks of multiple types that match certain criteria.
Args:
tasktypes (list): list of valid task types.
nightstr (list): comma separated (YYYYMMDD) or regex pattern.
states (list): list of task states to select.
expid (int): exposure ID to select.
spec (int): spectrograph to select.
nosubmitted (bool): if True, ignore tasks that were already
submitted.
db_postgres_user (str): If using postgres, connect as this
user for read-only access"
taskfile (str): if set write to this file, else write to STDOUT.
"""
if states is None:
states = task_states
else:
for s in states:
if s not in task_states:
raise RuntimeError("Task state '{}' is not valid".format(s))
dbpath = io.get_pipe_database()
db = pipedb.load_db(dbpath, mode="r", user=db_postgres_user)
allnights = io.get_nights(strip_path=True)
nights = pipeprod.select_nights(allnights, nightstr)
ttypes = list()
for tt in pipedb.all_task_types():
if tt in tasktypes:
ttypes.append(tt)
all_tasks = get_tasks(db, ttypes, nights, states=states, expid=expid,
spec=spec, nosubmitted=nosubmitted)
pipeprod.task_write(taskfile, all_tasks)
return
def getready(db, nightstr=None):
"""Update forward dependencies in the database.
Update database for one or more nights to ensure that forward
dependencies know that they are ready to run.
Args:
db (DataBase): the production DB.
nightstr (list): comma separated (YYYYMMDD) or regex pattern.
"""
allnights = io.get_nights(strip_path=True)
nights = pipeprod.select_nights(allnights, nightstr)
for nt in nights:
db.getready(night=nt)
return
def check_tasks(tasks, db=None):
"""Check the state of pipeline tasks.
If the database handle is given, use the DB for checking. Otherwise
use the filesystem.
Args:
tasks (list): list of tasks to check.
db (DataBase): the database to use.
Returns:
OrderedDict: dictionary of the state of each task.
"""
states = pipedb.check_tasks(tasks, db=db)
tskstate = OrderedDict()
for tsk in tasks:
tskstate[tsk] = states[tsk]
return tskstate
def sync(db, nightstr=None, specdone=False):
"""Synchronize DB state based on the filesystem.
This scans the filesystem for all tasks for the specified nights,
and updates the states accordingly.
Args:
db (DataBase): the production DB.
nightstr (list): comma separated (YYYYMMDD) or regex pattern.
specdone: If true, set spectra to done if files exist.
"""
allnights = io.get_nights(strip_path=True)
nights = pipeprod.select_nights(allnights, nightstr)
for nt in nights:
db.sync(nt,specdone=specdone)
return
def cleanup(db, tasktypes, failed=False, submitted=False, expid=None):
"""Clean up stale tasks in the DB.
Args:
db (DataBase): the production DB.
tasktypes (list): list of valid task types.
failed (bool): also clear failed states.
submitted (bool): also clear submitted flag.
expid (int): only clean this exposure ID.
"""
exid = None
if expid is not None and expid >= 0:
exid = expid
db.cleanup(tasktypes=tasktypes, expid=exid, cleanfailed=failed,
cleansubmitted=submitted)
return
def dryrun(tasks, nersc=None, nersc_queue="regular", nersc_maxtime=0,
nersc_maxnodes=0, nersc_shifter=None, mpi_procs=1, mpi_run="",
procs_per_node=0, nodb=False, db_postgres_user="desidev_ro", force=False):
"""Print equivalent command line jobs.
For the specified tasks, print the equivalent stand-alone commands
that would be run on each task. A pipeline job calls the internal
desispec.scripts entrypoints directly.
Args:
tasks (list): list of tasks to run.
nersc (str): if not None, the name of the nersc machine to use
(cori-haswell | cori-knl).
nersc_queue (str): the name of the queue to use
(regular | debug | realtime).
nersc_maxtime (int): if specified, restrict the runtime to this
number of minutes.
nersc_maxnodes (int): if specified, restrict the job to use this
number of nodes.
nersc_shifter (str): the name of the shifter image to use.
mpi_run (str): if specified, and if not using NERSC, use this
command to launch MPI executables in the shell scripts. Default
is to not use MPI.
mpi_procs (int): if not using NERSC, the number of MPI processes
to use in shell scripts.
procs_per_node (int): if specified, use only this number of
processes per node. Default runs one process per core.
nodb (bool): if True, do not use the production DB.
db_postgres_user (str): If using postgres, connect as this
user for read-only access"
force (bool): if True, print commands for all tasks, not just the ones
in a ready state.
"""
tasks_by_type = pipedb.task_sort(tasks)
(db, opts) = pipeprod.load_prod("r", user=db_postgres_user)
if nodb:
db = None
ppn = None
if procs_per_node > 0:
ppn = procs_per_node
if nersc is None:
# Not running at NERSC
if ppn is None:
ppn = mpi_procs
for tt, tlist in tasks_by_type.items():
piperun.dry_run(tt, tlist, opts, mpi_procs,
ppn, db=db, launch="mpirun -n", force=force)
else:
# Running at NERSC
hostprops = scriptgen.nersc_machine(nersc,
nersc_queue)
for tt, tlist in tasks_by_type.items():
joblist = scriptgen.nersc_job_size(tt, tlist,
nersc, nersc_queue, nersc_maxtime,
nersc_maxnodes, nodeprocs=ppn, db=db)
launch="srun -n"
for (jobnodes, jobppn, jobtime, jobworkers, jobtasks) in joblist:
jobprocs = jobnodes * jobppn
piperun.dry_run(tt, jobtasks, opts, jobprocs,
jobppn, db=db, launch=launch, force=force)
return
def gen_scripts(tasks_by_type, nersc=None, nersc_queue="regular",
nersc_maxtime=0, nersc_maxnodes=0, nersc_shifter=None, mpi_procs=1,
mpi_run="", procs_per_node=0, nodb=False, out=None, debug=False,
db_postgres_user="desidev_ro"):
"""Generate scripts to run tasks of one or more types.
If multiple task type keys are contained in the dictionary, they will
be packed into a single batch job.
Args:
tasks_by_type (dict): each key is the task type and the value is
a list of tasks.
nersc (str): if not None, the name of the nersc machine to use
(cori-haswell | cori-knl).
nersc_queue (str): the name of the queue to use
(regular | debug | realtime).
nersc_maxtime (int): if specified, restrict the runtime to this
number of minutes.
nersc_maxnodes (int): if specified, restrict the job to use this
number of nodes.
nersc_shifter (str): the name of the shifter image to use.
mpi_run (str): if specified, and if not using NERSC, use this
command to launch MPI executables in the shell scripts. Default
is to not use MPI.
mpi_procs (int): if not using NERSC, the number of MPI processes
to use in shell scripts.
procs_per_node (int): if specified, use only this number of
processes per node. Default runs one process per core.
nodb (bool): if True, do not use the production DB.
out (str): Put task scripts and logs in this directory relative to
the production 'scripts' directory. Default puts task directory
in the main scripts directory.
debug (bool): if True, enable DEBUG log level in generated scripts.
db_postgres_user (str): If using postgres, connect as this
user for read-only access"
Returns:
list: the generated script files
"""
ttypes = list(tasks_by_type.keys())
if len(ttypes)==0 :
return None
jobname = ttypes[0]
if len(ttypes) > 1:
jobname = "{}-{}".format(ttypes[0], ttypes[-1])
proddir = os.path.abspath(io.specprod_root())
import datetime
now = datetime.datetime.now()
outtaskdir = "{}_{:%Y%m%d-%H%M%S-%f}".format(jobname, now)
if out is None:
outdir = os.path.join(proddir, io.get_pipe_rundir(),
io.get_pipe_scriptdir(), outtaskdir)
else:
outdir = os.path.join(proddir, io.get_pipe_rundir(),
io.get_pipe_scriptdir(), out, outtaskdir)
if not os.path.isdir(outdir):
os.makedirs(outdir)
mstr = "run"
if nersc is not None:
mstr = nersc
outscript = os.path.join(outdir, mstr)
outlog = os.path.join(outdir, mstr)
(db, opts) = pipeprod.load_prod("r", user=db_postgres_user)
if nodb:
db = None
ppn = None
if procs_per_node > 0:
ppn = procs_per_node
# FIXME: Add openmp / multiproc function to task classes and
# call them here.
scripts = None
if nersc is None:
# Not running at NERSC
scripts = scriptgen.batch_shell(tasks_by_type,
outscript, outlog, mpirun=mpi_run,
mpiprocs=mpi_procs, openmp=1, db=db)
else:
# Running at NERSC
scripts = scriptgen.batch_nersc(tasks_by_type,
outscript, outlog, jobname, nersc, nersc_queue,
nersc_maxtime, nersc_maxnodes, nodeprocs=ppn,
openmp=False, multiproc=False, db=db,
shifterimg=nersc_shifter, debug=debug)
return scripts
def script(taskfile, nersc=None, nersc_queue="regular",
nersc_maxtime=0, nersc_maxnodes=0, nersc_shifter=None, mpi_procs=1,
mpi_run="", procs_per_node=0, nodb=False, out=None, debug=False,
db_postgres_user="desidev_ro"):
"""Generate pipeline scripts for a taskfile.
This gets tasks from the taskfile and sorts them by type. Then it
generates the scripts.
Args:
taskfile (str): read tasks from this file (if not specified,
read from STDIN).
nersc (str): if not None, the name of the nersc machine to use
(cori-haswell | cori-knl).
nersc_queue (str): the name of the queue to use
(regular | debug | realtime).
nersc_maxtime (int): if specified, restrict the runtime to this
number of minutes.
nersc_maxnodes (int): if specified, restrict the job to use this
number of nodes.
nersc_shifter (str): the name of the shifter image to use.
mpi_run (str): if specified, and if not using NERSC, use this
command to launch MPI executables in the shell scripts. Default
is to not use MPI.
mpi_procs (int): if not using NERSC, the number of MPI processes
to use in shell scripts.
procs_per_node (int): if specified, use only this number of
processes per node. Default runs one process per core.
nodb (bool): if True, do not use the production DB.
out (str): Put task scripts and logs in this directory relative to
the production 'scripts' directory. Default puts task directory
in the main scripts directory.
debug (bool): if True, enable DEBUG log level in generated scripts.
db_postgres_user (str): If using postgres, connect as this
user for read-only access"
Returns:
list: the generated script files
"""
tasks = pipeprod.task_read(taskfile)
scripts = list()
if len(tasks) > 0:
tasks_by_type = pipedb.task_sort(tasks)
scripts = gen_scripts(
tasks_by_type,
nersc=nersc,
nersc_queue=nersc_queue,
nersc_maxtime=nersc_maxtime,
nersc_maxnodes=nersc_maxnodes,
nersc_shifter=nersc_shifter,
mpi_procs=mpi_procs,
mpi_run=mpi_run,
procs_per_node=procs_per_node,
nodb=nodb,
out=out,
debug=debug,
db_postgres_user=db_postgres_user)
else:
import warnings
warnings.warn("Input task list is empty", RuntimeWarning)
return scripts
def run_scripts(scripts, deps=None, slurm=False):
"""Run job scripts with optional dependecies.
This either submits the jobs to the scheduler or simply runs them
in order with subprocess.
Args:
scripts (list): list of pathnames of the scripts to run.
deps (list): optional list of job IDs which are dependencies for
these scripts.
slurm (bool): if True use slurm to submit the jobs.
Returns:
list: the job IDs returned by the scheduler.
"""
import subprocess as sp
log = get_logger()
depstr = ""
if deps is not None and len(deps)>0 :
depstr = "-d afterok"
for d in deps:
depstr = "{}:{}".format(depstr, d)
jobids = list()
if slurm:
# submit each job and collect the job IDs
for scr in scripts:
scom = "sbatch {} {}".format(depstr, scr)
#print("RUN SCRIPTS: {}".format(scom))
log.debug(time.asctime())
log.info(scom)
sout = sp.check_output(scom, shell=True, universal_newlines=True)
log.info(sout)
p = sout.split()
jid = re.sub(r'[^\d]', '', p[3])
jobids.append(jid)
else:
# run the scripts one at a time
for scr in scripts:
rcode = sp.call(scr, shell=True)
if rcode != 0:
log.warning("script {} had return code = {}".format(scr,
rcode))
return jobids
def run(taskfile, nosubmitted=False, depjobs=None, nersc=None,
nersc_queue="regular", nersc_maxtime=0, nersc_maxnodes=0,
nersc_shifter=None, mpi_procs=1, mpi_run="", procs_per_node=0, nodb=False,
out=None, debug=False):
"""Create job scripts and run them.
This gets tasks from the taskfile and sorts them by type. Then it
generates the scripts. Finally, it runs or submits those scripts
to the scheduler.
Args:
taskfile (str): read tasks from this file (if not specified,
read from STDIN).
nosubmitted (bool): if True, do not run jobs that have already
been submitted.
depjobs (list): list of job ID dependencies.
nersc (str): if not None, the name of the nersc machine to use
(cori-haswell | cori-knl).
nersc_queue (str): the name of the queue to use
(regular | debug | realtime).
nersc_maxtime (int): if specified, restrict the runtime to this
number of minutes.
nersc_maxnodes (int): if specified, restrict the job to use this
number of nodes.
nersc_shifter (str): the name of the shifter image to use.
mpi_run (str): if specified, and if not using NERSC, use this
command to launch MPI executables in the shell scripts. Default
is to not use MPI.
mpi_procs (int): if not using NERSC, the number of MPI processes
to use in shell scripts.
procs_per_node (int): if specified, use only this number of
processes per node. Default runs one process per core.
nodb (bool): if True, do not use the production DB.
out (str): Put task scripts and logs in this directory relative to
the production 'scripts' directory. Default puts task directory
in the main scripts directory.
debug (bool): if True, enable DEBUG log level in generated scripts.
Returns:
list: the job IDs returned by the scheduler.
"""
log = get_logger()
tasks = pipeprod.task_read(taskfile)
jobids = list()
if len(tasks) > 0:
tasks_by_type = pipedb.task_sort(tasks)
tasktypes = list(tasks_by_type.keys())
# We are packing everything into one job
scripts = gen_scripts(
tasks_by_type,
nersc=nersc,
nersc_queue=nersc_queue,
nersc_maxtime=nersc_maxtime,
nersc_maxnodes=nersc_maxnodes,
nersc_shifter=nersc_shifter,
mpi_procs=mpi_procs,
mpi_run=mpi_run,
procs_per_node=procs_per_node,
nodb=nodb,
out=out,
debug=debug)
log.info("wrote scripts {}".format(scripts))
deps = None
slurm = False
if nersc is not None:
slurm = True
if depjobs is not None:
deps = depjobs
# Run the jobs
if not nodb:
# We can use the DB, mark tasks as submitted.
if slurm:
dbpath = io.get_pipe_database()
db = pipedb.load_db(dbpath, mode="w")
for tt in tasktypes:
if (tt != "spectra") and (tt != "redshift"):
db.set_submitted_type(tt, tasks_by_type[tt])
jobids = run_scripts(scripts, deps=deps, slurm=slurm)
else:
import warnings
warnings.warn("Input task list is empty", RuntimeWarning)
return jobids
def chain(tasktypes, nightstr=None, states=None, expid=None, spec=None,
pack=False, nosubmitted=False, depjobs=None, nersc=None,
nersc_queue="regular", nersc_maxtime=0, nersc_maxnodes=0,
nersc_shifter=None, mpi_procs=1, mpi_run="", procs_per_node=0, nodb=False,
out=None, debug=False, dryrun=False):
"""Run a chain of jobs for multiple pipeline steps.
For the list of task types, get all ready tasks meeting the selection
criteria. Then either pack all tasks into one job or submit
each task type as its own job. Input job dependencies can be
specified, and dependencies are tracked between jobs in the chain.
Args:
tasktypes (list): list of valid task types.
nightstr (str): Comma separated (YYYYMMDD) or regex pattern. Only
nights matching these patterns will be considered.
states (list): list of task states to select.
nights (list): list of nights to select.
expid (int): exposure ID to select.
pack (bool): if True, pack all tasks into a single job.
nosubmitted (bool): if True, do not run jobs that have already
been submitted.
depjobs (list): list of job ID dependencies.
nersc (str): if not None, the name of the nersc machine to use
(cori-haswell | cori-knl).
nersc_queue (str): the name of the queue to use
(regular | debug | realtime).
nersc_maxtime (int): if specified, restrict the runtime to this
number of minutes.
nersc_maxnodes (int): if specified, restrict the job to use this
number of nodes.
nersc_shifter (str): the name of the shifter image to use.
mpi_run (str): if specified, and if not using NERSC, use this
command to launch MPI executables in the shell scripts. Default
is to not use MPI.
mpi_procs (int): if not using NERSC, the number of MPI processes
to use in shell scripts.
procs_per_node (int): if specified, use only this number of
processes per node. Default runs one process per core.
nodb (bool): if True, do not use the production DB.
out (str): Put task scripts and logs in this directory relative to
the production 'scripts' directory. Default puts task directory
in the main scripts directory.
debug (bool): if True, enable DEBUG log level in generated scripts.
dryrun (bool): if True, do not submit the jobs.
Returns:
list: the job IDs from the final step in the chain.
"""
log = get_logger()
machprops = None
if nersc is not None:
machprops = scriptgen.nersc_machine(nersc, nersc_queue)
if states is None:
states = task_states
else:
for s in states:
if s not in task_states:
raise RuntimeError("Task state '{}' is not valid".format(s))
ttypes = list()
for tt in pipetasks.base.default_task_chain:
if tt in tasktypes:
ttypes.append(tt)
if (machprops is not None) and (not pack):
if len(ttypes) > machprops["submitlimit"]:
log.error("Queue {} on machine {} limited to {} jobs."\
.format(nersc_queue, nersc,
machprops["submitlimit"]))
log.error("Use a different queue or shorter chains of tasks.")
raise RuntimeError("Too many jobs")
slurm = False
if nersc is not None:
slurm = True
dbpath = io.get_pipe_database()
db = pipedb.load_db(dbpath, mode="w")
allnights = io.get_nights(strip_path=True)
nights = pipeprod.select_nights(allnights, nightstr)
outdeps = None
indeps = None
if depjobs is not None:
indeps = depjobs
tasks_by_type = OrderedDict()
for tt in ttypes:
# Get the tasks. We select by state and submitted status.
tasks = get_tasks_type(db, tt, states, nights, expid=expid, spec=spec)
#print("CHAIN: ", tt, tasks)
if nosubmitted:
if (tt != "spectra") and (tt != "redshift"):
sb = db.get_submitted(tasks)
tasks = [ x for x in tasks if not sb[x] ]
#print("CHAIN: nosubmitted: ", tt, tasks)
if len(tasks) == 0:
import warnings
warnings.warn("Input task list for '{}' is empty".format(tt),
RuntimeWarning)
continue # might be tasks to do in other ttype
tasks_by_type[tt] = tasks
scripts = None
tscripts = None
if pack:
# We are packing everything into one job
scripts = gen_scripts(
tasks_by_type,
nersc=nersc,
nersc_queue=nersc_queue,
nersc_maxtime=nersc_maxtime,
nersc_maxnodes=nersc_maxnodes,
nersc_shifter=nersc_shifter,
mpi_procs=mpi_procs,
mpi_run=mpi_run,
procs_per_node=procs_per_node,
nodb=nodb,
out=out,
debug=debug)
if scripts is not None and len(scripts)>0 :
log.info("wrote scripts {}".format(scripts))
else:
# Generate individual scripts
tscripts = dict()
for tt in ttypes:
onetype = OrderedDict()
onetype[tt] = tasks_by_type[tt]
tscripts[tt] = gen_scripts(
onetype,
nersc=nersc,
nersc_queue=nersc_queue,
nersc_maxtime=nersc_maxtime,
nersc_maxnodes=nersc_maxnodes,
nersc_shifter=nersc_shifter,
mpi_procs=mpi_procs,
mpi_run=mpi_run,
procs_per_node=procs_per_node,
nodb=nodb,
out=out,
debug=debug)
if tscripts[tt] is not None :
log.info("wrote script {}".format(tscripts[tt]))
if dryrun :
log.warning("dry run: do not submit the jobs")
return None
# Run the jobs
if slurm:
for tt in ttypes:
if (tt != "spectra") and (tt != "redshift"):
if tt in tasks_by_type.keys() :
db.set_submitted_type(tt, tasks_by_type[tt])
outdeps = None
if pack:
# Submit one job
if scripts is not None and len(scripts)>0 :
outdeps = run_scripts(scripts, deps=indeps, slurm=slurm)
else:
# Loop over task types submitting jobs and tracking dependencies.
for tt in ttypes:
if tscripts[tt] is not None :
outdeps = run_scripts(tscripts[tt], deps=indeps,
slurm=slurm)
if outdeps is not None and len(outdeps) > 0:
indeps = outdeps
else:
indeps = None
return outdeps
def status_color(state):
col = clr.ENDC
if state == "done":
col = clr.OKGREEN
elif state == "running":
col = clr.WARNING
elif state == "failed":
col = clr.FAIL
elif state == "ready":
col = clr.OKBLUE
return col
def status_task(task, ttype, state, logdir):
fields = pipetasks.base.task_classes[ttype].name_split(task)
tasklog = None
if "night" in fields:
tasklogdir = os.path.join(
logdir, io.get_pipe_nightdir(),
"{:08d}".format(fields["night"])
)
tasklog = os.path.join(
tasklogdir,
"{}.log".format(task)
)
elif "pixel" in fields:
tasklogdir = os.path.join(
logdir, "healpix",
io.healpix_subdirectory(fields["nside"],fields["pixel"])
)
tasklog = os.path.join(
tasklogdir,
"{}.log".format(task)
)
col = status_color(state)
print("Task {}".format(task))
print(
"State = {}{}{}".format(
col,
state,
clr.ENDC
)
)
if os.path.isfile(tasklog):
print("Dumping task log {}".format(tasklog))
print("=========== Begin Log =============")
print("")
with open(tasklog, "r") as f:
logdata = f.read()
print(logdata)
print("")
print("============ End Log ==============")
print("", flush=True)
else:
print("Task log {} does not exist".format(tasklog), flush=True)
return
def status_taskname(tsklist):
for tsk in tsklist:
st = tsk[1]
col = status_color(st)
print(
" {:20s}: {}{}{}".format(tsk[0], col, st, clr.ENDC),
flush=True
)
def status_night_totals(tasktypes, nights, tasks, tskstates):
# Accumulate totals for each night and type
sep = "------------------+---------+---------+---------+---------+---------+"
ntlist = list()
nighttot = OrderedDict()
for tt in tasktypes:
if tt == "spectra" or tt == "redshift":
# This function only prints nightly tasks
continue
for tsk in tasks[tt]:
fields = pipetasks.base.task_classes[tt].name_split(tsk)
nt = fields["night"]
if nt not in nighttot:
nighttot[nt] = OrderedDict()
if tt not in nighttot[nt]:
nighttot[nt][tt] = OrderedDict()
for s in task_states:
nighttot[nt][tt][s] = 0
st = tskstates[tt][tsk]
nighttot[nt][tt][st] += 1
for nt, ttstates in nighttot.items():
ntstr = "{:08d}".format(nt)
if ntstr in nights:
ntlist.append(nt)
ntlist = list(sorted(ntlist))
for nt in ntlist:
ttstates = nighttot[nt]
ntstr = "{:08d}".format(nt)
if ntstr in nights:
header = "{:18s}|".format(ntstr)
for s in task_states:
col = status_color(s)
header = "{} {}{:8s}{}|".format(
header, col, s, clr.ENDC
)
print(sep)
print(header)
print(sep)
for tt, totst in ttstates.items():
line = " {:16s}|".format(tt)
for s in task_states:
line = "{}{:9d}|".format(line, totst[s])
print(line)
print("", flush=True)
def status_pixel_totals(tasktypes, tasks, tskstates):
# Accumulate totals for each type
sep = "------------------+---------+---------+---------+---------+---------+"
pixtot = OrderedDict()
for tt in tasktypes:
if (tt != "spectra") and (tt != "redshift"):
# This function only prints pixel tasks
continue
for tsk in tasks[tt]:
if tt not in pixtot:
pixtot[tt] = OrderedDict()
for s in task_states:
pixtot[tt][s] = 0
st = tskstates[tt][tsk]
pixtot[tt][st] += 1
header = "{:18s}|".format("Pixel Tasks")
for s in task_states:
col = status_color(s)
header = "{} {}{:8s}{}|".format(
header, col, s, clr.ENDC
)
print(sep)
print(header)
print(sep)
for tt, totst in pixtot.items():
line = " {:16s}|".format(tt)
for s in task_states:
line = "{}{:9d}|".format(line, totst[s])
print(line)
print("", flush=True)
def status_night_tasks(tasktypes, nights, tasks, tskstates):
# Sort the tasks into nights
nighttasks = OrderedDict()
ntlist = list()
for tt in tasktypes:
if tt == "spectra" or tt == "redshift":
# This function only prints nightly tasks
continue
for tsk in tasks[tt]:
fields = pipetasks.base.task_classes[tt].name_split(tsk)
nt = fields["night"]
if nt not in nighttasks:
nighttasks[nt] = list()
nighttasks[nt].append((tsk, tskstates[tt][tsk]))
for nt, tsklist in nighttasks.items():
ntstr = "{:08d}".format(nt)
if ntstr in nights:
ntlist.append(nt)
ntlist = list(sorted(ntlist))
for nt in ntlist:
tsklist = nighttasks[nt]
ntstr = "{:08d}".format(nt)
if ntstr in nights:
print(nt)
status_taskname(tsklist)
def status_pixel_tasks(tasktypes, tasks, tskstates):
for tt in tasktypes:
tsklist = list()
if (tt != "spectra") and (tt != "redshift"):
# This function only prints pixel tasks
continue
for tsk in tasks[tt]:
tsklist.append((tsk, tskstates[tt][tsk]))
print(tt)
status_taskname(tsklist)
def status_summary(tasktypes, nights, tasks, tskstates):
sep = "----------------+---------+---------+---------+---------+---------+"
hline = "-----------------------------------------------"
print(sep)
header_state = "{:16s}|".format(" Task Type")
for s in task_states:
col = status_color(s)
header_state = "{} {}{:8s}{}|".format(
header_state, col, s, clr.ENDC
)
print(header_state)
print(sep)
for tt in tasktypes:
line = "{:16s}|".format(tt)
for s in task_states:
tsum = np.sum(
np.array(
[1 for x, y in tskstates[tt].items() if y == s],
dtype=np.int32
)
)
line = "{}{:9d}|".format(line, tsum)
print(line, flush=True)
def status(task=None, tasktypes=None, nightstr=None, states=None,
expid=None, spec=None, db_postgres_user="desidev_ro"):
"""Check the status of pipeline tasks.
Args:
Returns:
None
"""
dbpath = io.get_pipe_database()
db = pipedb.load_db(dbpath, mode="r", user=db_postgres_user)
rundir = io.get_pipe_rundir()
logdir = os.path.join(rundir, io.get_pipe_logdir())
tasks = OrderedDict()
summary = False
if (tasktypes is None) and (nightstr is None):
summary = True
if task is None:
ttypes = None
if tasktypes is not None:
ttypes = list()
for tt in pipetasks.base.default_task_chain:
if tt in tasktypes:
ttypes.append(tt)
else:
ttypes = list(pipetasks.base.default_task_chain)
if states is None:
states = task_states
else:
for s in states:
if s not in task_states:
raise RuntimeError("Task state '{}' is not valid".format(s))
allnights = io.get_nights(strip_path=True)
nights = pipeprod.select_nights(allnights, nightstr)
for tt in ttypes:
tasks[tt] = get_tasks(
db, [tt], nights, states=states, expid=expid, spec=spec
)
else:
ttypes = [pipetasks.base.task_type(task)]
tasks[ttypes[0]] = [task]
tstates = OrderedDict()
for typ, tsks in tasks.items():
tstates[typ] = pipedb.check_tasks(tsks, db=db)
if len(ttypes) == 1 and len(tasks[ttypes[0]]) == 1:
# Print status of this specific task
thistype = ttypes[0]
thistask = tasks[thistype][0]
status_task(thistask, thistype, tstates[thistype][thistask], logdir)
else:
if len(ttypes) > 1 and len(nights) > 1:
# We have multiple nights and multiple task types.
# Just print totals.
if summary:
status_summary(ttypes, nights, tasks, tstates)
else:
status_night_totals(ttypes, nights, tasks, tstates)
status_pixel_totals(ttypes, tasks, tstates)
elif len(ttypes) > 1:
# Multiple task types for one night. Print the totals for each
# task type.
thisnight = nights[0]
status_night_totals(ttypes, nights, tasks, tstates)
elif len(nights) > 1:
# We have just one task type, print the state totals for each night
# OR the full task list for redshift or spectra tasks.
thistype = ttypes[0]
print("Task type {}".format(thistype))
if thistype == "spectra" or thistype == "redshift":
status_pixel_tasks(ttypes, tasks, tstates)
else:
status_night_totals(ttypes, nights, tasks, tstates)
else:
# We have one type and one night, print the full state of every
# task.
thistype = ttypes[0]
thisnight = nights[0]
print("Task type {}".format(thistype))
status_night_tasks(ttypes, nights, tasks, tstates)
status_pixel_tasks(ttypes, tasks, tstates)
return
| bsd-3-clause |
sogelink/ansible | lib/ansible/module_utils/facts/virtual/freebsd.py | 135 | 1525 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
virtual_facts = {}
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
class FreeBSDVirtualCollector(VirtualCollector):
_fact_class = FreeBSDVirtual
_platform = 'FreeBSD'
| gpl-3.0 |
Javiercerna/MissionPlanner | Lib/encodings/zlib_codec.py | 88 | 3117 | """ Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
rwstauner/python-photo-shrinker | shrinkphotos.py | 1 | 2783 | #!/usr/bin/env python
"shrink photos so that it doesn't take 8 days to transfer them"
# Copyright (c) 2012 Randy Stauner
# Licensed under the MIT License: http://opensource.org/licenses/MIT
from PIL import Image
import os
from os.path import join, isfile, isdir, dirname, basename
import re
import sys
# configuration
DIR_SUFFIX = '-shrunk'
FILE_SUFFIX = '-shrunk'
# PIC_RE should capture the file extension;
# Later FILE_SUFFIX will be inserted before it
PIC_RE = re.compile(r'(\.jpe?g)$', re.I)
QUALITY = 85
RESOLUTION = (1600, 1200)
def shrinkphotos(top, src, dest):
"shrink each file in top/src/* and save in top/dest/*"
os.chdir(top)
src_full, dest_full = [join(top, x) for x in [src, dest]]
print "shrinking images found in\n %s\nand saving to\n %s" % \
(src_full, dest_full)
# Just a warning; Allow the script to be re-run.
if isdir(dest_full):
print "destination %s already exists" % (dest_full)
if raw_input("\ncontinue? (y/n): ").lower() == 'y':
recurse(src_full, dest_full)
def recurse(src, dest):
"down, down, down"
# I suppose this could be allowed as long as there is a FILE_SUFFIX
if src == dest:
raise "source and destination directories should not be the same!"
# os.walk descends by itself and then it's hard to replace src with dest
# so we recurse manually
files = os.listdir(src)
files.sort()
print " - %s: %d files" % (src, len(files))
for name in files:
if isfile(join(src, name)):
# If the file name matches the re (has the right extension)
if PIC_RE.search(name):
# src/file.jpg => dest/file-shrunk.jpg
dest_file = join(dest, PIC_RE.sub(r'%s\1' % FILE_SUFFIX, name))
# skip if already exists
if not isfile(dest_file):
# Ensure destination directory exists
if not isdir(dest):
os.makedirs(dest)
thumbnail(join(src, name), dest_file)
# descend to the next directory
elif isdir(join(src, name)):
recurse(join(src, name), join(dest, name))
def thumbnail(src, dest):
"shrink src and save to dest"
img = Image.open(src)
# Ensure image is not larger than RESOLUTION.
img.thumbnail(RESOLUTION)
# Set compression level on the new image.
img.save(dest, quality=QUALITY)
def shrinkarg(arg):
"use command line arg as source dir"
if arg == '' or arg == '.':
arg = os.getcwd()
# Strip trailing slash, etc
arg = os.path.normpath(arg)
top, src = dirname(arg), basename(arg)
dest = "%s%s" % (src, DIR_SUFFIX)
shrinkphotos(top, src, dest)
if __name__ == "__main__":
# If a directory is specified on command line
if len(sys.argv) > 1:
shrinkarg(sys.argv[1])
# Else use the directory the script is in
else:
shrinkarg(dirname(sys.argv[0]))
| mit |
scalable-networks/gnuradio-3.7.0.1 | gr-blocks/python/blocks/qa_max.py | 11 | 1954 | #!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import math
class test_max(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = (0,0.2,-0.3,0,12,0)
expected_result = (float(max(src_data)),)
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.max_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002(self):
src_data=(-100,-99,-98,-97,-96,-1)
expected_result = (float(max(src_data)),)
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.max_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_max, "test_max.xml")
| gpl-3.0 |
gistic/PublicSpatialImpala | thirdparty/thrift-0.9.0/test/py.twisted/test_suite.py | 42 | 5364 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob, time
sys.path.insert(0, './gen-py.twisted')
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.transport import TTwisted
from thrift.protocol import TBinaryProtocol
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.internet.protocol import ClientCreator
from zope.interface import implements
import random
class TestHandler:
implements(ThriftTest.Iface)
def __init__(self):
self.onewaysQueue = defer.DeferredQueue()
def testVoid(self):
pass
def testString(self, s):
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
x = Xception()
x.errorCode = 1001
x.message = s
raise x
elif s == "throw_undeclared":
raise ValueError("foo")
def testOneway(self, seconds):
def fireOneway(t):
self.onewaysQueue.put((t, time.time(), seconds))
reactor.callLater(seconds, fireOneway, time.time())
return d
def testNest(self, thing):
return thing
def testMap(self, thing):
return thing
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
class ThriftTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.handler = TestHandler()
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = reactor.listenTCP(0,
TTwisted.ThriftServerFactory(self.processor,
self.pfactory), interface="127.0.0.1")
self.portNo = self.server.getHost().port
self.txclient = yield ClientCreator(reactor,
TTwisted.ThriftClientProtocol,
ThriftTest.Client,
self.pfactory).connectTCP("127.0.0.1", self.portNo)
self.client = self.txclient.client
@defer.inlineCallbacks
def tearDown(self):
yield self.server.stopListening()
self.txclient.transport.loseConnection()
@defer.inlineCallbacks
def testVoid(self):
self.assertEquals((yield self.client.testVoid()), None)
@defer.inlineCallbacks
def testString(self):
self.assertEquals((yield self.client.testString('Python')), 'Python')
@defer.inlineCallbacks
def testByte(self):
self.assertEquals((yield self.client.testByte(63)), 63)
@defer.inlineCallbacks
def testI32(self):
self.assertEquals((yield self.client.testI32(-1)), -1)
self.assertEquals((yield self.client.testI32(0)), 0)
@defer.inlineCallbacks
def testI64(self):
self.assertEquals((yield self.client.testI64(-34359738368)), -34359738368)
@defer.inlineCallbacks
def testDouble(self):
self.assertEquals((yield self.client.testDouble(-5.235098235)), -5.235098235)
@defer.inlineCallbacks
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEquals(y.string_thing, "Zero")
self.assertEquals(y.byte_thing, 1)
self.assertEquals(y.i32_thing, -3)
self.assertEquals(y.i64_thing, -5)
@defer.inlineCallbacks
def testException(self):
yield self.client.testException('Safe')
try:
yield self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception, x:
self.assertEquals(x.errorCode, 1001)
self.assertEquals(x.message, 'Xception')
try:
yield self.client.testException("throw_undeclared")
self.fail("should have thrown exception")
except Exception: # type is undefined
pass
@defer.inlineCallbacks
def testOneway(self):
yield self.client.testOneway(2)
start, end, seconds = yield self.handler.onewaysQueue.get()
self.assertAlmostEquals(seconds, (end - start), places=2)
| apache-2.0 |
50wu/gpdb | contrib/unaccent/generate_unaccent_rules.py | 7 | 12976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This script builds unaccent.rules on standard output when given the
# contents of UnicodeData.txt [1] and Latin-ASCII.xml [2] given as
# arguments. Optionally includes ligature expansion and Unicode CLDR
# Latin-ASCII transliterator, enabled by default, this can be disabled
# with "--no-ligatures-expansion" command line option.
#
# The approach is to use the Unicode decomposition data to identify
# precomposed codepoints that are equivalent to a ligature of several
# letters, or a base letter with any number of diacritical marks.
#
# This approach handles most letters with diacritical marks and some
# ligatures. However, several characters (notably a majority of
# ligatures) don't have decomposition. To handle all these cases, one can
# use a standard Unicode transliterator available in Common Locale Data
# Repository (CLDR): Latin-ASCII. This transliterator associates Unicode
# characters to ASCII-range equivalent. Unless "--no-ligatures-expansion"
# option is enabled, the XML file of this transliterator [2] -- given as a
# command line argument -- will be parsed and used.
#
# Ideally you should use the latest release for each data set. For
# Latin-ASCII.xml, the latest data sets released can be browsed directly
# via [3]. Note that this script is compatible with at least release 29.
#
# [1] http://unicode.org/Public/8.0.0/ucd/UnicodeData.txt
# [2] http://unicode.org/cldr/trac/export/14746/tags/release-34/common/transforms/Latin-ASCII.xml
# [3] https://unicode.org/cldr/trac/browser/tags
# BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
# The approach is to be Python3 compatible with Python2 "backports".
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import sys
if sys.version_info[0] <= 2:
# Encode stdout as UTF-8, so we can just print to it
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Map Python 2's chr to unichr
chr = unichr
# Python 2 and 3 compatible bytes call
def bytes(source, encoding='ascii', errors='strict'):
return source.encode(encoding=encoding, errors=errors)
# END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
import re
import argparse
import sys
import xml.etree.ElementTree as ET
# The ranges of Unicode characters that we consider to be "plain letters".
# For now we are being conservative by including only Latin and Greek. This
# could be extended in future based on feedback from people with relevant
# language knowledge.
PLAIN_LETTER_RANGES = ((ord('a'), ord('z')), # Latin lower case
(ord('A'), ord('Z')), # Latin upper case
(0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
(0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA
# Combining marks follow a "base" character, and result in a composite
# character. Example: "U&'A\0300'"produces "À".There are three types of
# combining marks: enclosing (Me), non-spacing combining (Mn), spacing
# combining (Mc). We identify the ranges of marks we feel safe removing.
# References:
# https://en.wikipedia.org/wiki/Combining_character
# https://www.unicode.org/charts/PDF/U0300.pdf
# https://www.unicode.org/charts/PDF/U20D0.pdf
COMBINING_MARK_RANGES = ((0x0300, 0x0362), # Mn: Accents, IPA
(0x20dd, 0x20E0), # Me: Symbols
(0x20e2, 0x20e4),) # Me: Screen, keycap, triangle
def print_record(codepoint, letter):
if letter:
output = chr(codepoint) + "\t" + letter
else:
output = chr(codepoint)
print(output)
class Codepoint:
def __init__(self, id, general_category, combining_ids):
self.id = id
self.general_category = general_category
self.combining_ids = combining_ids
def is_mark_to_remove(codepoint):
"""Return true if this is a combining mark to remove."""
if not is_mark(codepoint):
return False
for begin, end in COMBINING_MARK_RANGES:
if codepoint.id >= begin and codepoint.id <= end:
return True
return False
def is_plain_letter(codepoint):
"""Return true if codepoint represents a "plain letter"."""
for begin, end in PLAIN_LETTER_RANGES:
if codepoint.id >= begin and codepoint.id <= end:
return True
return False
def is_mark(codepoint):
"""Returns true for diacritical marks (combining codepoints)."""
return codepoint.general_category in ("Mn", "Me", "Mc")
def is_letter_with_marks(codepoint, table):
"""Returns true for letters combined with one or more marks."""
# See http://www.unicode.org/reports/tr44/tr44-14.html#General_Category_Values
# Letter may have no combining characters, in which case it has
# no marks.
if len(codepoint.combining_ids) == 1:
return False
# A letter without diacritical marks has none of them.
if any(is_mark(table[i]) for i in codepoint.combining_ids[1:]) is False:
return False
# Check if the base letter of this letter has marks.
codepoint_base = codepoint.combining_ids[0]
if (is_plain_letter(table[codepoint_base]) is False and \
is_letter_with_marks(table[codepoint_base], table) is False):
return False
return True
def is_letter(codepoint, table):
"""Return true for letter with or without diacritical marks."""
return is_plain_letter(codepoint) or is_letter_with_marks(codepoint, table)
def get_plain_letter(codepoint, table):
"""Return the base codepoint without marks. If this codepoint has more
than one combining character, do a recursive lookup on the table to
find out its plain base letter."""
if is_letter_with_marks(codepoint, table):
if len(table[codepoint.combining_ids[0]].combining_ids) > 1:
return get_plain_letter(table[codepoint.combining_ids[0]], table)
elif is_plain_letter(table[codepoint.combining_ids[0]]):
return table[codepoint.combining_ids[0]]
# Should not come here
assert(False)
elif is_plain_letter(codepoint):
return codepoint
# Should not come here
assert(False)
def is_ligature(codepoint, table):
"""Return true for letters combined with letters."""
return all(is_letter(table[i], table) for i in codepoint.combining_ids)
def get_plain_letters(codepoint, table):
"""Return a list of plain letters from a ligature."""
assert(is_ligature(codepoint, table))
return [get_plain_letter(table[id], table) for id in codepoint.combining_ids]
def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath):
"""Parse the XML file and return a set of tuples (src, trg), where "src"
is the original character and "trg" the substitute."""
charactersSet = set()
# RegEx to parse rules
rulePattern = re.compile(r'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
# construct tree from XML
transliterationTree = ET.parse(latinAsciiFilePath)
transliterationTreeRoot = transliterationTree.getroot()
# Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
# all the transliteration rules are located in a single tRule block with
# all rules separated into separate lines.
blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
assert(len(blockRules) == 1)
# Split the block of rules into one element per line.
rules = blockRules[0].text.splitlines()
# And finish the processing of each individual rule.
for rule in rules:
matches = rulePattern.search(rule)
# The regular expression capture four groups corresponding
# to the characters.
#
# Group 1: plain "src" char. Empty if group 2 is not.
# Group 2: unicode-escaped "src" char (e.g. "\u0110"). Empty if group 1 is not.
#
# Group 3: plain "trg" char. Empty if group 4 is not.
# Group 4: plain "trg" char between quotes. Empty if group 3 is not.
if matches is not None:
src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF-8').decode('unicode-escape')
trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
# "'" and """ are escaped
trg = trg.replace("\\'", "'").replace('\\"', '"')
# the parser of unaccent only accepts non-whitespace characters
# for "src" and "trg" (see unaccent.c)
if not src.isspace() and not trg.isspace():
charactersSet.add((ord(src), trg))
return charactersSet
def special_cases():
"""Returns the special cases which are not handled by other methods"""
charactersSet = set()
# Cyrillic
charactersSet.add((0x0401, u"\u0415")) # CYRILLIC CAPITAL LETTER IO
charactersSet.add((0x0451, u"\u0435")) # CYRILLIC SMALL LETTER IO
# Symbols of "Letterlike Symbols" Unicode Block (U+2100 to U+214F)
charactersSet.add((0x2103, u"\xb0C")) # DEGREE CELSIUS
charactersSet.add((0x2109, u"\xb0F")) # DEGREE FAHRENHEIT
charactersSet.add((0x2117, "(P)")) # SOUND RECORDING COPYRIGHT
return charactersSet
def main(args):
# http://www.unicode.org/reports/tr44/tr44-14.html#Character_Decomposition_Mappings
decomposition_type_pattern = re.compile(" *<[^>]*> *")
table = {}
all = []
# unordered set for ensure uniqueness
charactersSet = set()
# read file UnicodeData.txt
unicodeDataFile = open(args.unicodeDataFilePath, 'r')
# read everything we need into memory
for line in unicodeDataFile:
fields = line.split(";")
if len(fields) > 5:
# http://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
general_category = fields[2]
decomposition = fields[5]
decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
id = int(fields[0], 16)
combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
codepoint = Codepoint(id, general_category, combining_ids)
table[id] = codepoint
all.append(codepoint)
# walk through all the codepoints looking for interesting mappings
for codepoint in all:
if codepoint.general_category.startswith('L') and \
len(codepoint.combining_ids) > 1:
if is_letter_with_marks(codepoint, table):
charactersSet.add((codepoint.id,
chr(get_plain_letter(codepoint, table).id)))
elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
charactersSet.add((codepoint.id,
"".join(chr(combining_codepoint.id)
for combining_codepoint \
in get_plain_letters(codepoint, table))))
elif is_mark_to_remove(codepoint):
charactersSet.add((codepoint.id, None))
# add CLDR Latin-ASCII characters
if not args.noLigaturesExpansion:
charactersSet |= parse_cldr_latin_ascii_transliterator(args.latinAsciiFilePath)
charactersSet |= special_cases()
# sort for more convenient display
charactersList = sorted(charactersSet, key=lambda characterPair: characterPair[0])
for characterPair in charactersList:
print_record(characterPair[0], characterPair[1])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')
parser.add_argument("--unicode-data-file", help="Path to formatted text file corresponding to UnicodeData.txt. See <http://unicode.org/Public/8.0.0/ucd/UnicodeData.txt>.", type=str, required=True, dest='unicodeDataFilePath')
parser.add_argument("--latin-ascii-file", help="Path to XML file from Unicode Common Locale Data Repository (CLDR) corresponding to Latin-ASCII transliterator (Latin-ASCII.xml). See <http://unicode.org/cldr/trac/export/12304/tags/release-28/common/transforms/Latin-ASCII.xml>.", type=str, dest='latinAsciiFilePath')
parser.add_argument("--no-ligatures-expansion", help="Do not expand ligatures and do not use Unicode CLDR Latin-ASCII transliterator. By default, this option is not enabled and \"--latin-ascii-file\" argument is required. If this option is enabled, \"--latin-ascii-file\" argument is optional and ignored.", action="store_true", dest='noLigaturesExpansion')
args = parser.parse_args()
if args.noLigaturesExpansion is False and args.latinAsciiFilePath is None:
sys.stderr.write('You must specify the path to Latin-ASCII transliterator file with \"--latin-ascii-file\" option or use \"--no-ligatures-expansion\" option. Use \"-h\" option for help.')
sys.exit(1)
main(args)
| apache-2.0 |
kartikshah1/Test | venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py | 341 | 1877 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
| mit |
pkdevbox/trac | trac/ticket/tests/admin.py | 1 | 13172 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.resource import ResourceNotFound
from trac.test import EnvironmentStub, Mock, MockPerm, locale_en
from trac.ticket.admin import ComponentAdminPanel, MilestoneAdminPanel, \
PriorityAdminPanel, ResolutionAdminPanel, \
SeverityAdminPanel, TicketTypeAdminPanel, \
VersionAdminPanel
from trac.ticket.model import Component, Milestone, Priority, Resolution,\
Severity, Type, Version
from trac.util.datefmt import utc
from trac.web.api import RequestDone, _RequestArgs
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def _create_request(self, authname='anonymous', **kwargs):
kw = {'path_info': '/', 'perm': MockPerm(), 'args': _RequestArgs(),
'href': self.env.href, 'abs_href': self.env.abs_href,
'tz': utc, 'locale': None, 'lc_time': locale_en,
'session': {}, 'authname': authname,
'chrome': {'notices': [], 'warnings': []},
'method': None, 'get_header': lambda v: None, 'is_xhr': False,
'form_token': None}
if 'args' in kwargs:
kw['args'].update(kwargs.pop('args'))
kw.update(kwargs)
def redirect(url, permanent=False):
raise RequestDone
return Mock(add_redirect_listener=lambda x: [].append(x),
redirect=redirect, **kw)
class ComponentAdminPanelTestCase(BaseTestCase):
def test_add_component(self):
cap = ComponentAdminPanel(self.env)
name, owner = 'component3', 'user3'
req = self._create_request(method='POST',
args={'name': name, 'owner': owner,
'add': True})
self.assertRaises(ResourceNotFound, Component, self.env, name)
self.assertRaises(RequestDone, cap.render_admin_panel, req,
'ticket', 'component', None)
component = Component(self.env, name)
self.assertEqual(name, component.name)
self.assertEqual(owner, component.owner)
def test_remove_component(self):
cap = ComponentAdminPanel(self.env)
name = 'component2'
req = self._create_request(method='POST',
args={'sel': name, 'remove': True})
component = Component(self.env, name)
self.assertEqual(name, component.name)
self.assertEqual('somebody', component.owner)
self.assertRaises(RequestDone, cap.render_admin_panel, req,
'ticket', 'component', None)
self.assertRaises(ResourceNotFound, Component, self.env, name)
def test_remove_multiple_components(self):
cap = ComponentAdminPanel(self.env)
names = ['component1', 'component2']
req = self._create_request(method='POST',
args={'sel': names, 'remove': True})
for name in names:
component = Component(self.env, name)
self.assertEqual(name, component.name)
self.assertEqual('somebody', component.owner)
self.assertRaises(RequestDone, cap.render_admin_panel, req,
'ticket', 'component', None)
for name in names:
self.assertRaises(ResourceNotFound, Component, self.env, name)
def test_set_default_component(self):
name = 'component2'
config_key = 'default_component'
cap = ComponentAdminPanel(self.env)
req = self._create_request(method='POST',
args={'default': name, 'apply': True})
self.assertRaises(RequestDone, cap.render_admin_panel, req,
'ticket', 'component', None)
self.assertEqual(name, self.env.config.get('ticket', config_key))
def test_remove_default_component(self):
name = 'component2'
cap = ComponentAdminPanel(self.env)
config_key = 'default_component'
self.env.config.set('ticket', config_key, name)
req = self._create_request(method='POST',
args={'sel': name, 'remove': True})
self.assertRaises(RequestDone, cap.render_admin_panel, req,
'ticket', 'component', None)
self.assertEqual('', self.env.config.get('ticket', config_key))
class MilestoneAdminPanelTestCase(BaseTestCase):
def test_add_milestone(self):
name = 'milestone5'
map = MilestoneAdminPanel(self.env)
req = self._create_request(method='POST',
args={'name': name, 'add': True})
self.assertRaises(ResourceNotFound, Milestone, self.env, name)
self.assertRaises(RequestDone, map.render_admin_panel, req,
'ticket', 'milestone', None)
milestone = Milestone(self.env, name)
self.assertEqual(name, milestone.name)
def test_set_default_milestone(self):
name = 'milestone2'
config_key = 'default_milestone'
map = MilestoneAdminPanel(self.env)
req = self._create_request(method='POST',
args={'ticket_default': name,
'apply': True})
self.assertRaises(RequestDone, map.render_admin_panel, req,
'ticket', 'milestone', None)
self.assertEqual(name, self.env.config.get('ticket', config_key))
def test_set_default_retarget_to(self):
name = 'milestone2'
config_key = 'default_retarget_to'
map = MilestoneAdminPanel(self.env)
req = self._create_request(method='POST',
args={'retarget_default': name,
'apply': True})
self.assertRaises(RequestDone, map.render_admin_panel, req,
'ticket', 'milestone', None)
self.assertEqual(name, self.env.config.get('milestone', config_key))
def test_remove_default_milestone(self):
name = 'milestone2'
map = MilestoneAdminPanel(self.env)
self.env.config.set('ticket', 'default_milestone', 'milestone2')
self.env.config.set('milestone', 'default_retarget_to', 'milestone2')
req = self._create_request(method='POST',
args={'sel': name,
'remove': True})
self.assertRaises(RequestDone, map.render_admin_panel, req,
'ticket', 'milestone', None)
self.assertEqual('', self.env.config.get('ticket',
'default_milestone'))
self.assertEqual('', self.env.config.get('milestone',
'default_retarget_to'))
class AbstractEnumTestCase(BaseTestCase):
type = None
cls = None
def _test_add(self, panel, name):
req = self._create_request(method='POST',
args={'name': name, 'add': True})
self.assertRaises(ResourceNotFound, self.cls, self.env, name)
self.assertRaises(RequestDone, panel.render_admin_panel, req,
'ticket', self.type, None)
item = self.cls(self.env, name)
self.assertEqual(name, item.name)
def _test_set_default(self, panel, name):
config_key = 'default_' + self.type
req = self._create_request(method='POST',
args={'default': name, 'apply': True})
for item in self.cls.select(self.env):
req.args.update({'value_' + str(item.value): str(item.value)})
self.assertRaises(RequestDone, panel.render_admin_panel, req,
'ticket', self.type, None)
self.assertEqual(name, self.env.config.get('ticket', config_key))
def _test_remove_default(self, panel, name):
config_key = 'default_' + self.type
self.env.config.set('ticket', config_key, name)
req = self._create_request(method='POST',
args={'sel': name, 'remove': True})
self.assertRaises(RequestDone, panel.render_admin_panel, req,
'ticket', self.type, None)
self.assertEqual('', self.env.config.get('ticket', config_key))
class PriorityAdminPanelTestCase(AbstractEnumTestCase):
type = 'priority'
cls = Priority
def test_add_priority(self):
ap = PriorityAdminPanel(self.env)
self._test_add(ap, 'priority 1')
def test_set_default_priority(self):
ap = PriorityAdminPanel(self.env)
self._test_set_default(ap, 'critical')
def test_remove_default_priority(self):
ap = PriorityAdminPanel(self.env)
self._test_remove_default(ap, 'critical')
class ResolutionAdminPanelTestCase(AbstractEnumTestCase):
type = 'resolution'
cls = Resolution
def test_add_resolution(self):
ap = ResolutionAdminPanel(self.env)
self._test_add(ap, 'resolution 1')
def test_set_default_resolution(self):
ap = ResolutionAdminPanel(self.env)
self._test_set_default(ap, 'invalid')
def test_remove_default_resolution(self):
ap = ResolutionAdminPanel(self.env)
self._test_remove_default(ap, 'invalid')
class SeverityAdminPanelTestCase(AbstractEnumTestCase):
type = 'severity'
cls = Severity
def test_add_severity(self):
ap = SeverityAdminPanel(self.env)
self._test_add(ap, 'severity 1')
def test_set_default_severity(self):
s = Severity(self.env)
s.name = 'severity 1'
s.insert()
ap = SeverityAdminPanel(self.env)
self._test_set_default(ap, 'severity 1')
def test_remove_default_severity(self):
s = Severity(self.env)
s.name = 'severity 1'
s.insert()
ap = SeverityAdminPanel(self.env)
self._test_remove_default(ap, 'severity 1')
class TicketTypeAdminPanelTestCase(AbstractEnumTestCase):
type = 'type'
cls = Type
def test_add_type(self):
ap = TicketTypeAdminPanel(self.env)
self._test_add(ap, 'improvement')
def test_set_default_type(self):
ap = TicketTypeAdminPanel(self.env)
self._test_set_default(ap, 'task')
def test_remove_default_type(self):
ap = TicketTypeAdminPanel(self.env)
self._test_remove_default(ap, 'task')
class VersionAdminPanelTestCase(BaseTestCase):
def test_add_version(self):
name = '3.0'
ap = VersionAdminPanel(self.env)
req = self._create_request(method='POST',
args={'name': name, 'add': True})
self.assertRaises(ResourceNotFound, Version, self.env, name)
self.assertRaises(RequestDone, ap.render_admin_panel, req,
'ticket', 'version', None)
version = Version(self.env, name)
self.assertEqual(name, version.name)
def test_set_default_version(self):
name = '1.0'
ap = VersionAdminPanel(self.env)
config_key = 'default_version'
req = self._create_request(method='POST',
args={'default': name, 'apply': True})
self.assertRaises(RequestDone, ap.render_admin_panel, req,
'ticket', 'version', None)
self.assertEqual(name, self.env.config.get('ticket', config_key))
def test_remove_default_version(self):
name = '1.0'
ap = VersionAdminPanel(self.env)
config_key = 'default_version'
self.env.config.set('ticket', config_key, name)
req = self._create_request(method='POST',
args={'sel': name, 'remove': True})
self.assertRaises(RequestDone, ap.render_admin_panel, req,
'ticket', 'version', None)
self.assertEqual(self.env.config.get('ticket', config_key), '')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ComponentAdminPanelTestCase))
suite.addTest(unittest.makeSuite(MilestoneAdminPanelTestCase))
suite.addTest(unittest.makeSuite(PriorityAdminPanelTestCase))
suite.addTest(unittest.makeSuite(ResolutionAdminPanelTestCase))
suite.addTest(unittest.makeSuite(SeverityAdminPanelTestCase))
suite.addTest(unittest.makeSuite(TicketTypeAdminPanelTestCase))
suite.addTest(unittest.makeSuite(VersionAdminPanelTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause |
rackerlabs/heat-pyrax | pyrax/image.py | 12 | 24535 | # -*- coding: utf-8 -*-
# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import wraps
import pyrax
from pyrax.object_storage import StorageObject
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
DEFAULT_FORMAT = "vhd"
def assure_image(fnc):
"""
Converts a image ID passed as the 'image' parameter to a image object.
"""
@wraps(fnc)
def _wrapped(self, img, *args, **kwargs):
if not isinstance(img, Image):
# Must be the ID
img = self._manager.get(img)
return fnc(self, img, *args, **kwargs)
return _wrapped
class Image(BaseResource):
"""
This class represents an Image.
"""
def __init__(self, manager, info, key=None, loaded=False,
member_manager_class=None, tag_manager_class=None):
super(Image, self).__init__(manager, info, key=key, loaded=loaded)
member_manager_class = member_manager_class or ImageMemberManager
tag_manager_class = tag_manager_class or ImageTagManager
self._member_manager = member_manager_class(self.manager.api,
resource_class=ImageMember, response_key="",
plural_response_key="members", uri_base="images/%s/members" %
self.id)
self._tag_manager = tag_manager_class(self.manager.api,
resource_class=ImageTag, response_key="",
plural_response_key="tags", uri_base="images/%s/tags" %
self.id)
self._non_display = [
"com.rackspace__1__build_core",
"com.rackspace__1__build_managed",
"com.rackspace__1__build_rackconnect",
"com.rackspace__1__options",
"com.rackspace__1__platform_target",
"com.rackspace__1__release_build_date",
"com.rackspace__1__release_id",
"com.rackspace__1__release_version",
"com.rackspace__1__source",
"com.rackspace__1__visible_core",
"com.rackspace__1__visible_managed",
"com.rackspace__1__visible_rackconnect",
"file",
"instance_type_ephemeral_gb",
"instance_type_flavorid",
"instance_type_id",
"instance_type_memory_mb",
"instance_type_name",
"instance_type_root_gb",
"instance_type_rxtx_factor",
"instance_type_swap",
"instance_type_vcpu_weight",
"instance_type_vcpus",
"instance_uuid",
"org.openstack__1__architecture",
"org.openstack__1__os_distro",
"org.openstack__1__os_version",
"rax_activation_profile",
"rax_managed",
"rax_options",
"schema",
"self",
]
def update(self, value_dict):
"""
Accepts a and dictionary of key/value pairs, where the key is an
attribute of the image, and the value is the desired new value for that
image.
"""
return self.manager.update(self, value_dict)
def change_name(self, newname):
"""
Image name can be changed via the update() method. This is simply a
convenience method.
"""
return self.update({"name": newname})
def list_members(self):
"""
Returns a list of all Members for this image.
"""
return self._member_manager.list()
def get_member(self, member):
"""
Returns the ImageMember object representing the specified member
"""
return self._member_manager.get(member)
def add_member(self, project_id):
"""
Adds the project (tenant) represented by the project_id as a member of
this image.
"""
return self._member_manager.create(name=None, project_id=project_id)
def delete_member(self, project_id):
"""
Removes the project (tenant) represented by the project_id as a member
of this image.
"""
return self._member_manager.delete(project_id)
def add_tag(self, tag):
"""
Adds the tag to this image.
"""
return self._tag_manager.add(tag)
def delete_tag(self, tag):
"""
Deletes the tag from this image.
"""
return self._tag_manager.delete(tag)
class ImageMember(BaseResource):
"""
This class represents a member (user) of an Image.
"""
@property
def id(self):
return self.member_id
class ImageTag(BaseResource):
"""
This class represents a tag for an Image.
"""
pass
class ImageTask(BaseResource):
"""
This class represents a ImageTask.
"""
pass
class ImageManager(BaseManager):
"""
Manager class for an Image.
"""
def _create_body(self, name, metadata=None):
"""
Used to create the dict required to create a new queue
"""
if metadata is None:
body = {}
else:
body = {"metadata": metadata}
return body
def list(self, limit=None, marker=None, name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None,
return_raw=False):
"""
Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters.
"""
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri, return_raw=return_raw)
def list_all(self, name=None, visibility=None, member_status=None,
owner=None, tag=None, status=None, size_min=None, size_max=None,
sort_key=None, sort_dir=None):
"""
Returns all of the images in one call, rather than in paginated batches.
"""
def strip_version(uri):
"""
The 'next' uri contains a redundant version number. We need to
strip it to use in the method_get() call.
"""
pos = uri.find("/images")
return uri[pos:]
obj_class = self.resource_class
resp, resp_body = self.list(name=name, visibility=visibility,
member_status=member_status, owner=owner, tag=tag,
status=status, size_min=size_min, size_max=size_max,
sort_key=sort_key, sort_dir=sort_dir, return_raw=True)
data = resp_body.get(self.plural_response_key, resp_body)
next_uri = strip_version(resp_body.get("next", ""))
ret = [obj_class(manager=self, info=res) for res in data if res]
while next_uri:
resp, resp_body = self.api.method_get(next_uri)
data = resp_body.get(self.plural_response_key, resp_body)
next_uri = strip_version(resp_body.get("next", ""))
ret.extend([obj_class(manager=self, info=res)
for res in data if res])
return ret
def create(self, name, img_format=None, img_container_format=None,
data=None, container=None, obj=None, metadata=None):
"""
Creates a new image with the specified name. The image data can either
be supplied directly in the 'data' parameter, or it can be an image
stored in the object storage service. In the case of the latter, you
can either supply the container and object names, or simply a
StorageObject reference.
You may specify the image and image container formats; if unspecified,
the default of "vhd" for image format and "bare" for image container
format will be used.
NOTE: This is blocking, and may take a while to complete.
"""
if img_format is None:
img_format = "vhd"
if img_container_format is None:
img_container_format = "bare"
headers = {
"X-Image-Meta-name": name,
"X-Image-Meta-disk_format": img_format,
"X-Image-Meta-container_format": img_container_format,
}
if data:
img_data = data
else:
ident = self.api.identity
region = self.api.region_name
clt = ident.get_client("object_store", region)
if not isinstance(obj, StorageObject):
obj = clt.get_object(container, obj)
img_data = obj.fetch()
uri = "%s/images" % self.uri_base
resp, resp_body = self.api.method_post(uri, headers=headers,
data=img_data)
def update(self, img, value_dict):
"""
Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
NOTE: There is a bug in Glance where the 'add' operation returns a 409
if the property already exists, which conflicts with the spec. So to
get around this a fresh copy of the image must be retrieved, and the
value of 'op' must be determined based on whether this attribute exists
or not.
"""
img = self.get(img)
uri = "/%s/%s" % (self.uri_base, utils.get_id(img))
body = []
for key, val in value_dict.items():
op = "replace" if key in img.__dict__ else "add"
body.append({"op": op,
"path": "/%s" % key,
"value": val})
headers = {"Content-Type":
"application/openstack-images-v2.1-json-patch"}
resp, resp_body = self.api.method_patch(uri, body=body, headers=headers)
def update_image_member(self, img_id, status):
"""
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised.
"""
if status not in ("pending", "accepted", "rejected"):
raise exc.InvalidImageMemberStatus("The status value must be one "
"of 'accepted', 'rejected', or 'pending'. Received: '%s'" %
status)
api = self.api
project_id = api.identity.tenant_id
uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id)
body = {"status": status}
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.NotFound as e:
raise exc.InvalidImageMember("The update member request could not "
"be completed. No member request for that image was found.")
class ImageMemberManager(BaseManager):
"""
Manager class for members (users) of an Image.
"""
def _create_body(self, name, project_id):
"""
Used to create the dict required to add a member to this image.
"""
body = {"member": project_id}
return body
def create(self, name, *args, **kwargs):
"""
Need to wrap the default call to handle exceptions.
"""
try:
return super(ImageMemberManager, self).create(name, *args, **kwargs)
except Exception as e:
if e.http_status == 403:
raise exc.UnsharableImage("You cannot share a public image.")
else:
raise
class ImageTagManager(BaseManager):
"""
Manager class for Image tags.
"""
def _create_body(self, name):
"""
Not used; the add() method is used with a PUT request.
"""
return {}
def add(self, tag):
"""
"""
uri = "/%s/%s" % (self.uri_base, tag)
resp, resp_body = self.api.method_put(uri)
class ImageTasksManager(BaseManager):
"""
Manager class for ImageTasks.
"""
def _create_body(self, name, img=None, cont=None, img_format=None,
img_name=None):
"""
Used to create a new task. Since tasks don't have names, the required
'name' parameter is used for the type of task: 'import' or 'export'.
"""
img = utils.get_id(img)
cont = utils.get_name(cont)
body = {"type": name}
if name == "export":
body["input"] = {
"image_uuid": img,
"receiving_swift_container": cont}
else:
nm = "%s/%s" % (cont, utils.get_name(img))
body["input"] = {
"image_properties": {"name": img_name or img},
"import_from": nm,
"import_from_format": img_format or DEFAULT_FORMAT}
return body
def create(self, name, *args, **kwargs):
"""
Standard task creation, but first check for the existence of the
containers, and raise an exception if they don't exist.
"""
cont = kwargs.get("cont")
if cont:
# Verify that it exists. If it doesn't, a NoSuchContainer exception
# will be raised.
api = self.api
rgn = api.region_name
cf = api.identity.object_store[rgn].client
cf.get_container(cont)
return super(ImageTasksManager, self).create(name, *args, **kwargs)
class JSONSchemaManager(BaseManager):
"""
Manager class for retrieving JSON schemas.
"""
def _create_body(self, name):
"""
Not used.
"""
pass
def images(self):
"""
Returns a json-schema document that represents an image members entity,
which is a container of image member entities.
"""
uri = "/%s/images" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
def image(self):
"""
Returns a json-schema document that represents a single image entity.
"""
uri = "/%s/image" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
def image_members(self):
"""
Returns a json-schema document that represents an image members entity
(a container of member entities).
"""
uri = "/%s/members" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
def image_member(self):
"""
Returns a json-schema document that represents an image member entity.
(a container of member entities).
"""
uri = "/%s/member" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
def image_tasks(self):
"""
Returns a json-schema document that represents a container of tasks
entities.
"""
uri = "/%s/tasks" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
def image_task(self):
"""
Returns a json-schema document that represents an task entity.
"""
uri = "/%s/task" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body
class ImageClient(BaseClient):
"""
This is the primary class for interacting with Images.
"""
name = "Images"
def _configure_manager(self):
"""
Create the manager to handle queues.
"""
self._manager = ImageManager(self, resource_class=Image,
response_key="", plural_response_key="images",
uri_base="images")
self._tasks_manager = ImageTasksManager(self, resource_class=ImageTask,
response_key="", plural_response_key="tasks",
uri_base="tasks")
self._schema_manager = JSONSchemaManager(self, resource_class=None,
response_key="", plural_response_key="", uri_base="schemas")
def list(self, limit=None, marker=None, name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None):
"""
Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters.
"""
return self._manager.list(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir)
def list_all(self, name=None, visibility=None, member_status=None,
owner=None, tag=None, status=None, size_min=None, size_max=None,
sort_key=None, sort_dir=None):
"""
Returns all of the images in one call, rather than in paginated batches.
The same filtering options available in list() apply here, with the
obvious exception of limit and marker.
"""
return self._manager.list_all(name=name, visibility=visibility,
member_status=member_status, owner=owner, tag=tag,
status=status, size_min=size_min, size_max=size_max,
sort_key=sort_key, sort_dir=sort_dir)
def update(self, img, value_dict):
"""
Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
"""
return self._manager.update(img, value_dict)
def create(self, name, img_format=None, data=None, container=None,
obj=None, metadata=None):
"""
Creates a new image with the specified name. The image data can either
be supplied directly in the 'data' parameter, or it can be an image
stored in the object storage service. In the case of the latter, you
can either supply the container and object names, or simply a
StorageObject reference.
"""
return self._manager.create(name, img_format, data=data,
container=container, obj=obj)
def change_image_name(self, img, newname):
"""
Image name can be changed via the update() method. This is simply a
convenience method.
"""
return self.update(img, {"name": newname})
@assure_image
def list_image_members(self, img):
"""
Returns a list of members (users) of the specified image.
"""
return img.list_members()
@assure_image
def get_image_member(self, img, member):
"""
Returns the ImageMember object representing the specified member for the
specified image.
"""
return img.get_member(member)
@assure_image
def add_image_member(self, img, project_id):
"""
Adds the project (tenant) represented by the project_id as a member of
the specified image.
"""
return img.add_member(project_id)
@assure_image
def delete_image_member(self, img, project_id):
"""
Removes the project (tenant) represented by the project_id as a member
of the specified image.
"""
return img.delete_member(project_id)
def update_image_member(self, img_id, status):
"""
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image; that is, the user with whom the image is being shared. If called
by the owner of the image, an `InvalidImageMember` exception will be
raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an `InvalidImageMemberStatus` exception
being raised.
"""
return self._manager.update_image_member(img_id, status)
@assure_image
def add_image_tag(self, img, tag):
"""
Adds the tag to the specified image.
"""
return img.add_tag(tag)
@assure_image
def delete_image_tag(self, img, tag):
"""
Deletes the tag from the specified image.
"""
return img.delete_tag(tag)
def list_tasks(self):
"""
Returns a list of all tasks.
"""
return self._tasks_manager.list()
def get_task(self, task):
"""
Returns the ImageTask object for the supplied ID.
"""
return self._tasks_manager.get(task)
def export_task(self, img, cont):
"""
Creates a task to export the specified image to the swift container
named in the 'cont' parameter. If the container does not exist, a
NoSuchContainer exception is raised.
The 'img' parameter can be either an Image object or the ID of an
image. If these do not correspond to a valid image, a NotFound
exception is raised.
"""
return self._tasks_manager.create("export", img=img, cont=cont)
def import_task(self, img, cont, img_format=None, img_name=None):
"""
Creates a task to import the specified image from the swift container
named in the 'cont' parameter. The new image will be named the same as
the object in the container unless you specify a value for the
'img_name' parameter.
By default it is assumed that the image is in 'vhd' format; if it is
another format, you must specify that in the 'img_format' parameter.
"""
return self._tasks_manager.create("import", img=img, cont=cont,
img_format=img_format, img_name=img_name)
def get_images_schema(self):
"""
Returns a json-schema document that represents an image members entity,
which is a container of image member entities.
"""
return self._schema_manager.images()
def get_image_schema(self):
"""
Returns a json-schema document that represents a single image entity.
"""
return self._schema_manager.image()
def get_image_members_schema(self):
"""
Returns a json-schema document that represents an image members entity
(a container of member entities).
"""
return self._schema_manager.image_members()
def get_image_member_schema(self):
"""
Returns a json-schema document that represents an image member entity.
(a container of member entities).
"""
return self._schema_manager.image_member()
def get_image_tasks_schema(self):
"""
Returns a json-schema document that represents a container of tasks
entities.
"""
return self._schema_manager.image_tasks()
def get_image_task_schema(self):
"""
Returns a json-schema document that represents an task entity.
"""
return self._schema_manager.image_task()
| apache-2.0 |
clarkerubber/irwin | modules/irwin/training/Evaluation.py | 1 | 3292 | from default_imports import *
from conf.ConfigWrapper import ConfigWrapper
from modules.game.Player import Player
from modules.game.GameStore import GameStore
from modules.game.AnalysedGame import GameAnalysedGame
from modules.irwin.PlayerReport import PlayerReport
class Evaluation(NamedTuple('Evaluation', [
('irwin', 'Irwin'),
('config', ConfigWrapper)
])):
def getPlayerOutcomes(self, engine: bool, batchSize: int) -> Opt[int]: # returns a generator for activations, player by player.
for player in self.irwin.env.playerDB.engineSample(engine, batchSize):
analysedGames = self.irwin.env.analysedGameDB.byPlayerId(player.id)
games = self.irwin.env.gameDB.byIds([ag.gameId for ag in analysedGames])
predictions = self.irwin.analysedGameModel.predict([GameAnalysedGame(ag, g) for ag, g in zip(analysedGames, games) if ag.gameLength() <= 60])
playerReport = PlayerReport.new(player, zip(analysedGames, predictions))
if len(playerReport.gameReports) > 0:
yield Evaluation.outcome(
playerReport.activation,
92, 64, engine)
else:
yield None
def evaluate(self):
outcomes = []
[[((outcomes.append(o) if o is not None else ...), Evaluation.performance(outcomes)) for o in self.getPlayerOutcomes(engine, self.config['irwin testing eval_size'])] for engine in (True, False)]
@staticmethod
def performance(outcomes):
tp = len([a for a in outcomes if a == 1])
fn = len([a for a in outcomes if a == 2])
tn = len([a for a in outcomes if a == 3])
fp = len([a for a in outcomes if a == 4])
tr = len([a for a in outcomes if a == 5])
fr = len([a for a in outcomes if a == 6])
cheatsLen = max(1, tp + fn + tr)
legitsLen = max(1, fp + tn + fr)
logging.warning("True positive: " + str(tp) + " (" + str(int(100*tp/cheatsLen)) + "%)")
logging.warning("False negative: " + str(fn) + " (" + str(int(100*fn/cheatsLen)) + "%)")
logging.warning("True negative: " + str(tn) + " (" + str(int(100*tn/legitsLen)) + "%)")
logging.warning("False positive: " + str(fp) + " (" + str(int(100*fp/legitsLen)) + "%)")
logging.warning("True Report: " + str(tr) + " (" + str(int(100*tr/cheatsLen)) + "%)")
logging.warning("False Report: " + str(fr) + " (" + str(int(100*fr/legitsLen)) + "%)")
logging.warning("Cheats coverage: " + str(int(100*(tp+tr)/cheatsLen)) + "%")
logging.warning("Legits coverage: " + str(int(100*(tn)/legitsLen)) + "%")
@staticmethod
def outcome(a: int, tm: int, tr: int, e: bool) -> int: # activation, threshold mark, threshold report, expected value
logging.debug(a)
true_positive = 1
false_negative = 2
true_negative = 3
false_positive = 4
true_report = 5
false_report = 6
if a > tm and e:
return true_positive
if a > tm and not e:
return false_positive
if a > tr and e:
return true_report
if a > tr and not e:
return false_report
if a <= tr and e:
return false_negative
return true_negative | agpl-3.0 |
PanYuntao/node-gyp | gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
kvar/ansible | lib/ansible/modules/network/ftd/ftd_install.py | 27 | 11868 | #!/usr/bin/python
# Copyright (c) 2019 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ftd_install
short_description: Installs FTD pkg image on the firewall
description:
- Provisioning module for FTD devices that installs ROMMON image (if needed) and
FTD pkg image on the firewall.
- Can be used with `httpapi` and `local` connection types. The `httpapi` is preferred,
the `local` connection should be used only when the device cannot be accessed via
REST API.
version_added: "2.8"
requirements: [ "python >= 3.5", "firepower-kickstart" ]
notes:
- Requires `firepower-kickstart` library that should be installed separately and requires Python >= 3.5.
- On localhost, Ansible can be still run with Python >= 2.7, but the interpreter for this particular module must be
Python >= 3.5.
- Python interpreter for the module can overwritten in `ansible_python_interpreter` variable.
author: "Cisco Systems, Inc. (@annikulin)"
options:
device_hostname:
description:
- Hostname of the device as appears in the prompt (e.g., 'firepower-5516').
required: true
type: str
device_username:
description:
- Username to login on the device.
- Defaulted to 'admin' if not specified.
required: false
type: str
default: admin
device_password:
description:
- Password to login on the device.
required: true
type: str
device_sudo_password:
description:
- Root password for the device. If not specified, `device_password` is used.
required: false
type: str
device_new_password:
description:
- New device password to set after image installation.
- If not specified, current password from `device_password` property is reused.
- Not applicable for ASA5500-X series devices.
required: false
type: str
device_ip:
description:
- Device IP address of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_gateway:
description:
- Device gateway of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_netmask:
description:
- Device netmask of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_model:
description:
- Platform model of the device (e.g., 'Cisco ASA5506-X Threat Defense').
- If not specified and connection is 'httpapi`, the module tries to fetch the device model via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
choices:
- Cisco ASA5506-X Threat Defense
- Cisco ASA5508-X Threat Defense
- Cisco ASA5516-X Threat Defense
- Cisco Firepower 2110 Threat Defense
- Cisco Firepower 2120 Threat Defense
- Cisco Firepower 2130 Threat Defense
- Cisco Firepower 2140 Threat Defense
dns_server:
description:
- DNS IP address of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
console_ip:
description:
- IP address of a terminal server.
- Used to set up an SSH connection with device's console port through the terminal server.
required: true
type: str
console_port:
description:
- Device's port on a terminal server.
required: true
type: str
console_username:
description:
- Username to login on a terminal server.
required: true
type: str
console_password:
description:
- Password to login on a terminal server.
required: true
type: str
rommon_file_location:
description:
- Path to the boot (ROMMON) image on TFTP server.
- Only TFTP is supported.
required: true
type: str
image_file_location:
description:
- Path to the FTD pkg image on the server to be downloaded.
- FTP, SCP, SFTP, TFTP, or HTTP protocols are usually supported, but may depend on the device model.
required: true
type: str
image_version:
description:
- Version of FTD image to be installed.
- Helps to compare target and current FTD versions to prevent unnecessary reinstalls.
required: true
type: str
force_install:
description:
- Forces the FTD image to be installed even when the same version is already installed on the firewall.
- By default, the module stops execution when the target version is installed in the device.
required: false
type: bool
default: false
search_domains:
description:
- Search domains delimited by comma.
- Defaulted to 'cisco.com' if not specified.
required: false
type: str
default: cisco.com
"""
EXAMPLES = """
- name: Install image v6.3.0 on FTD 5516
ftd_install:
device_hostname: firepower
device_password: pass
device_ip: 192.168.0.1
device_netmask: 255.255.255.0
device_gateway: 192.168.0.254
dns_server: 8.8.8.8
console_ip: 10.89.0.0
console_port: 2004
console_username: console_user
console_password: console_pass
rommon_file_location: 'tftp://10.89.0.11/installers/ftd-boot-9.10.1.3.lfbff'
image_file_location: 'https://10.89.0.11/installers/ftd-6.3.0-83.pkg'
image_version: 6.3.0-83
"""
RETURN = """
msg:
description: The message saying whether the image was installed or explaining why the installation failed.
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.ftd.configuration import BaseConfigurationResource, ParamName
from ansible.module_utils.network.ftd.device import assert_kick_is_installed, FtdPlatformFactory, FtdModel
from ansible.module_utils.network.ftd.operation import FtdOperations, get_system_info
REQUIRED_PARAMS_FOR_LOCAL_CONNECTION = ['device_ip', 'device_netmask', 'device_gateway', 'device_model', 'dns_server']
def main():
fields = dict(
device_hostname=dict(type='str', required=True),
device_username=dict(type='str', required=False, default='admin'),
device_password=dict(type='str', required=True, no_log=True),
device_sudo_password=dict(type='str', required=False, no_log=True),
device_new_password=dict(type='str', required=False, no_log=True),
device_ip=dict(type='str', required=False),
device_netmask=dict(type='str', required=False),
device_gateway=dict(type='str', required=False),
device_model=dict(type='str', required=False, choices=FtdModel.supported_models()),
dns_server=dict(type='str', required=False),
search_domains=dict(type='str', required=False, default='cisco.com'),
console_ip=dict(type='str', required=True),
console_port=dict(type='str', required=True),
console_username=dict(type='str', required=True),
console_password=dict(type='str', required=True, no_log=True),
rommon_file_location=dict(type='str', required=True),
image_file_location=dict(type='str', required=True),
image_version=dict(type='str', required=True),
force_install=dict(type='bool', required=False, default=False)
)
module = AnsibleModule(argument_spec=fields)
assert_kick_is_installed(module)
use_local_connection = module._socket_path is None
if use_local_connection:
check_required_params_for_local_connection(module, module.params)
platform_model = module.params['device_model']
check_that_model_is_supported(module, platform_model)
else:
connection = Connection(module._socket_path)
resource = BaseConfigurationResource(connection, module.check_mode)
system_info = get_system_info(resource)
platform_model = module.params['device_model'] or system_info['platformModel']
check_that_model_is_supported(module, platform_model)
check_that_update_is_needed(module, system_info)
check_management_and_dns_params(resource, module.params)
ftd_platform = FtdPlatformFactory.create(platform_model, module.params)
ftd_platform.install_ftd_image(module.params)
module.exit_json(changed=True,
msg='Successfully installed FTD image %s on the firewall device.' % module.params["image_version"])
def check_required_params_for_local_connection(module, params):
missing_params = [k for k, v in iteritems(params) if k in REQUIRED_PARAMS_FOR_LOCAL_CONNECTION and v is None]
if missing_params:
message = "The following parameters are mandatory when the module is used with 'local' connection: %s." % \
', '.join(sorted(missing_params))
module.fail_json(msg=message)
def check_that_model_is_supported(module, platform_model):
if platform_model not in FtdModel.supported_models():
module.fail_json(msg="Platform model '%s' is not supported by this module." % platform_model)
def check_that_update_is_needed(module, system_info):
target_ftd_version = module.params["image_version"]
if not module.params["force_install"] and target_ftd_version == system_info['softwareVersion']:
module.exit_json(changed=False, msg="FTD already has %s version of software installed." % target_ftd_version)
def check_management_and_dns_params(resource, params):
if not all([params['device_ip'], params['device_netmask'], params['device_gateway']]):
management_ip = resource.execute_operation(FtdOperations.GET_MANAGEMENT_IP_LIST, {})['items'][0]
params['device_ip'] = params['device_ip'] or management_ip['ipv4Address']
params['device_netmask'] = params['device_netmask'] or management_ip['ipv4NetMask']
params['device_gateway'] = params['device_gateway'] or management_ip['ipv4Gateway']
if not params['dns_server']:
dns_setting = resource.execute_operation(FtdOperations.GET_DNS_SETTING_LIST, {})['items'][0]
dns_server_group_id = dns_setting['dnsServerGroup']['id']
dns_server_group = resource.execute_operation(FtdOperations.GET_DNS_SERVER_GROUP,
{ParamName.PATH_PARAMS: {'objId': dns_server_group_id}})
params['dns_server'] = dns_server_group['dnsServers'][0]['ipAddress']
if __name__ == '__main__':
main()
| gpl-3.0 |
songhan/neon | neon/transforms/tests/test_leaky.py | 9 | 4642 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from nose.plugins.attrib import attr
import numpy as np
from neon.backends.cpu import CPU, CPUTensor
from neon.transforms.rectified import RectLeaky
from neon.util.testing import assert_tensor_equal
def compare_cpu_tensors(inputs, outputs, deriv=False):
rlin = RectLeaky()
be = CPU()
temp = be.zeros(inputs.shape)
if deriv is True:
rlin.apply_derivative(be, CPUTensor(inputs), temp)
else:
rlin.apply_function(be, CPUTensor(inputs), temp)
be.subtract(temp, CPUTensor(outputs), temp)
assert_tensor_equal(temp, be.zeros(inputs.shape))
def compare_cc2_tensors(inputs, outputs, deriv=False):
from neon.backends.cc2 import GPU, GPUTensor
rlin = RectLeaky()
be = GPU()
temp = be.zeros(inputs.shape)
if deriv is True:
rlin.apply_derivative(be, GPUTensor(inputs), temp)
else:
rlin.apply_function(be, GPUTensor(inputs), temp)
be.subtract(temp, GPUTensor(outputs), temp)
assert_tensor_equal(temp, be.zeros(inputs.shape))
def test_rectleaky_positives():
inputs = np.array([1, 3, 2])
outputs = np.array([1, 3, 2])
compare_cpu_tensors(inputs, outputs)
def test_rectleaky_negatives():
inputs = np.array([[-1, -3], [-2, -4]])
outputs = np.array([[-0.01, -0.03], [-0.02, -0.04]])
compare_cpu_tensors(inputs, outputs)
def test_rectleaky_mixed():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[4, 0], [-0.02, 9]])
compare_cpu_tensors(inputs, outputs)
@attr('cuda')
def test_rectleaky_cc2tensor():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[4, 0], [-0.02, 9]])
compare_cc2_tensors(inputs, outputs)
def test_rectleaky_derivative_positives():
inputs = np.array([1, 3, 2])
outputs = np.array([1, 1, 1])
compare_cpu_tensors(inputs, outputs, deriv=True)
def test_rectleaky_derivative_negatives():
inputs = np.array([[-1, -3], [-2, -4]])
outputs = np.array([[0.01, 0.01], [0.01, 0.01]])
compare_cpu_tensors(inputs, outputs, deriv=True)
def test_rectleaky_derivative_mixed():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[1, 0.01], [0.01, 1]])
compare_cpu_tensors(inputs, outputs, deriv=True)
@attr('cuda')
def test_rectleaky_derivative_cc2tensor():
inputs = np.array([[4, 0], [-2, 9]])
outputs = np.array([[1, 0.01], [0.01, 1]])
compare_cc2_tensors(inputs, outputs, deriv=True)
def test_rectleaky_slope_zero_rectlin_equiv():
be = CPU()
inputs = be.uniform(low=-5.0, high=10.0, size=(10, 10))
lin_buf = be.empty(inputs.shape)
leaky_buf = be.empty(inputs.shape)
be.rectlin(inputs, out=lin_buf)
be.rectleaky(inputs, slope=0.0, out=leaky_buf)
assert_tensor_equal(lin_buf, leaky_buf)
def test_rectleaky_derivative_slope_zero_rectlin_equiv():
be = CPU()
inputs = be.uniform(low=-5.0, high=10.0, size=(10, 10))
lin_buf = be.empty(inputs.shape)
leaky_buf = be.empty(inputs.shape)
be.rectlin_derivative(inputs, out=lin_buf)
be.rectleaky_derivative(inputs, slope=0.0, out=leaky_buf)
assert_tensor_equal(lin_buf, leaky_buf)
@attr('cuda')
def test_cc2_rectleaky_slope_zero_rectlin_equiv():
from neon.backends.cc2 import GPU
be = GPU()
inputs = be.uniform(low=-5.0, high=10.0, size=(10, 10))
lin_buf = be.empty(inputs.shape)
leaky_buf = be.empty(inputs.shape)
be.rectlin(inputs, out=lin_buf)
be.rectleaky(inputs, slope=0.0, out=leaky_buf)
assert_tensor_equal(lin_buf, leaky_buf)
@attr('cuda')
def test_cc2_rectleaky_derivative_slope_zero_rectlin_equiv():
from neon.backends.cc2 import GPU
be = GPU()
inputs = be.uniform(low=-5.0, high=10.0, size=(10, 10))
lin_buf = be.empty(inputs.shape)
leaky_buf = be.empty(inputs.shape)
be.rectlin_derivative(inputs, out=lin_buf)
be.rectleaky_derivative(inputs, slope=0.0, out=leaky_buf)
assert_tensor_equal(lin_buf, leaky_buf)
| apache-2.0 |
HybridF5/jacket | jacket/api/compute/openstack/compute/legacy_v2/contrib/server_diagnostics.py | 1 | 2408 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.compute import cloud
from jacket.compute import exception
from jacket.i18n import _
authorize = extensions.extension_authorizer('cloud', 'server_diagnostics')
class ServerDiagnosticsController(object):
def __init__(self):
self.compute_api = cloud.API()
def index(self, req, server_id):
context = req.environ["compute.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics', server_id)
except NotImplementedError:
msg = _("Unable to get diagnostics, functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
class Server_diagnostics(extensions.ExtensionDescriptor):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
namespace = ("http://docs.openstack.org/cloud/ext/"
"server-diagnostics/api/v1.1")
updated = "2011-12-21T00:00:00Z"
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
# NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
return [ext]
| apache-2.0 |
anisyonk/pilot | saga/constants.py | 10 | 1290 |
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" Global constants """
import radical.utils as ru
######################################################################
#
# task constansts
#
SYNC = 1 # 'Sync'
ASYNC = 2 # 'Async'
TASK = 3 # 'Task'
UNKNOWN = 'Unknown'
NEW = ru.NEW
RUNNING = ru.RUNNING
DONE = ru.DONE
FAILED = ru.FAILED
CANCELED = 'Canceled'
FINAL = [DONE, FAILED, CANCELED]
STATE = 'State'
RESULT = 'Result'
EXCEPTION = 'Exception'
ALL = 'All'
ANY = 'Any'
######################################################################
#
# task container constants
#
SIZE = "Size"
TASKS = "Tasks"
STATES = "States"
######################################################################
#
# context container constants
#
TYPE = "Type"
SERVER = "Server"
TOKEN = "Token"
CERT_REPOSITORY = "CertRepository"
USER_PROXY = "UserProxy"
USER_CERT = "UserCert"
USER_KEY = "UserKey"
USER_ID = "UserID"
USER_PASS = "UserPass"
USER_VO = "UserVO"
LIFE_TIME = "LifeTime"
REMOTE_ID = "RemoteID"
REMOTE_HOST = "RemoteHost"
REMOTE_PORT = "RemotePort"
| apache-2.0 |
lgarren/spack | var/spack/repos/builtin/packages/googletest/package.py | 3 | 2706 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Googletest(CMakePackage):
"""Google test framework for C++. Also called gtest."""
homepage = "https://github.com/google/googletest"
url = "https://github.com/google/googletest/tarball/release-1.7.0"
version('1.8.0', 'd2edffbe844902d942c31db70c7cfec2')
version('1.7.0', '5eaf03ed925a47b37c8e1d559eb19bc4')
version('1.6.0', '90407321648ab25b067fcd798caf8c78')
variant('gmock', default=False, description='Build with gmock')
conflicts('+gmock', when='@:1.7.0')
def cmake_args(self):
spec = self.spec
if '@1.8.0:' in spec:
# New style (contains both Google Mock and Google Test)
options = ['-DBUILD_GTEST=ON']
if '+gmock' in spec:
options.append('-DBUILD_GMOCK=ON')
else:
options.append('-DBUILD_GMOCK=OFF')
else:
# Old style (contains only GTest)
options = []
return options
@when('@:1.7.0')
def install(self, spec, prefix):
"""Make the install targets"""
with working_dir(self.build_directory):
# Google Test doesn't have a make install
# We have to do our own install here.
install_tree(join_path(self.stage.source_path, 'include'),
prefix.include)
mkdirp(prefix.lib)
install('libgtest.a', prefix.lib)
install('libgtest_main.a', prefix.lib)
| lgpl-2.1 |
mirror/wget | testenv/misc/metalinkv3_xml.py | 12 | 10236 | from test.http_test import HTTPTest
from misc.wget_file import WgetFile
import hashlib
class Metalinkv3_XML:
""" Metalink/XML v3 object """
# Initialize the Metalink object
def __init__ (self):
self.reset ()
# Reset the Metalink object
def reset (self):
self.LocalFiles = [] # list of WgetFile objects
self.ServerFiles = [[]] # list of WgetFile objects
self.ExpectedFiles = [] # list of WgetFile objects
self.LocalFiles_Set = [] # used as `list (set (var))`
self.ServerFiles_Set = [[]] # used as `list (set (var))`
self.ExpectedFiles_Set = [] # used as `list (set (var))`
self.Xml = '' # Metalink/XML content
self.XmlName = '' # Metalink/XML file name
self.XmlFile = None # Metalink/XML WgetFile object
self.Xml_Header = '<?xml version="1.0" encoding="utf-8"?>\n' + \
'<metalink version="3.0" xmlns="http://www.metalinker.org/">\n' + \
' <publisher>\n' + \
' <name>GNU Wget</name>\n' + \
' </publisher>\n' + \
' <license>\n' + \
' <name>GNU GPL</name>\n' + \
' <url>http://www.gnu.org/licenses/gpl.html</url>\n' + \
' </license>\n' + \
' <identity>Wget Test Files</identity>\n' + \
' <version>1.2.3</version>\n' + \
' <description>Wget Test Files description</description>\n' + \
' <files>\n'
self.Xml_Footer = ' </files>\n' + \
'</metalink>\n'
# Print the Metalink object.
def print_meta (self):
print (self.Xml)
print ("LocalFiles = " + str (self.LocalFiles_Set))
print ("ServerFiles = " + str (self.ServerFiles_Set))
print ("ExpectedFiles = " + str (self.ExpectedFiles_Set))
# Add LocalFiles as WgetFile objects
#
# ["file_name", "content"],
# ["file_name", "content"]
def add_LocalFiles (self, *local_files):
for (file_name, content) in local_files:
if not file_name in self.LocalFiles_Set:
self.LocalFiles_Set.append (file_name)
self.LocalFiles.append (WgetFile (file_name, content))
# Add ServerFiles as WgetFile objects
#
# ["file_name", "content"],
# ["file_name", "content"]
def add_ServerFiles (self, *server_files):
for (file_name, content) in server_files:
if not file_name in self.ServerFiles_Set[0]:
self.ServerFiles_Set[0].append (file_name)
self.ServerFiles[0].append (WgetFile (file_name, content))
# Add ExpectedFiles as WgetFile objects
#
# ["file_name", "content"],
# ["file_name", "content"]
def add_ExpectedFiles (self, *expected_files):
for (file_name, content) in expected_files:
if not file_name in self.ExpectedFiles_Set:
self.ExpectedFiles_Set.append (file_name)
self.ExpectedFiles.append (WgetFile (file_name, content))
# Run a Wget HTTP test for the Metalink object.
def http_test (self, command_line, expected_retcode):
pre_test = {
"ServerFiles" : self.ServerFiles, # list of WgetFile objects as [[]]
"LocalFiles" : self.LocalFiles, # list of WgetFile objects as []
}
test_options = {
"WgetCommands" : command_line, # Wget cli
"Urls" : [[]], # Wget urls
}
post_test = {
"ExpectedFiles" : self.ExpectedFiles, # list of WgetFile objects as []
"ExpectedRetcode" : expected_retcode, # Wget return status code
}
http_test = HTTPTest (
pre_hook=pre_test,
test_params=test_options,
post_hook=post_test,
)
http_test.server_setup()
# Get and use dynamic server sockname
srv_host, srv_port = http_test.servers[0].server_inst.socket.getsockname ()
self.set_srv (srv_host, srv_port)
err = http_test.begin ()
return err
# Set the Wget server host and port in the Metalink/XML content.
def set_srv (self, srv_host, srv_port):
self.Xml = self.Xml.replace('{{SRV_HOST}}', srv_host)
self.Xml = self.Xml.replace('{{SRV_PORT}}', str (srv_port))
if self.XmlFile is not None:
self.XmlFile.content = self.Xml
# Create the Metalink/XML file.
#
# Add the Metalink/XML file to the list of ExpectedFiles.
#
# size:
# True auto-compute size
# None no <size></size>
# any use this size
#
# hash_sha256:
# False no <verification></verification>
# True auto-compute sha256
# None no <hash></hash>
# any use this hash
#
# ARGUMENTS:
#
# "xml_name", # Metalink/XML file name
# ["file_name", "save_name", "content", size, hash_sha256, # metalink:file
# ["srv_file", "srv_content", utype, location, preference], # resource
# ["srv_file", "srv_content", utype, location, preference]], # resource
# ["file_name", "save_name", "content", size, hash_sha256,
# ["srv_file", "srv_content", utype, location, preference],
# ["srv_file", "srv_content", utype, location, preference]]
def xml (self, xml_name, *xml_data):
self.Xml = self.Xml_Header
for (file_name, save_name, content, size, hash_sha256, *resources) in xml_data:
self.Xml += self.file_tag (file_name, save_name, content, size, hash_sha256, resources) + '\n'
self.Xml += self.Xml_Footer
self.XmlName = xml_name
self.XmlFile = WgetFile (xml_name, self.Xml)
if not xml_name in self.LocalFiles_Set:
self.LocalFiles_Set.append (xml_name)
self.LocalFiles.append (self.XmlFile)
if not xml_name in self.ExpectedFiles_Set:
self.ExpectedFiles_Set.append (xml_name)
self.ExpectedFiles.append (self.XmlFile)
# Create the file tag.
#
# Add the file to be saved to the list of ExpectedFiles.
#
# size:
# True auto-compute size
# None no <size></size>
# any use this size
#
# hash_sha256:
# False no <verification></verification>
# True auto-compute sha256
# None no <hash></hash>
# any use this hash
#
# ARGUMENTS:
#
# ["file_name", "save_name", "content", size, hash_sha256, # metalink:file
# ["srv_file", "srv_content", utype, location, preference], # resource
# ["srv_file", "srv_content", utype, location, preference]] # resource
def file_tag (self, file_name, save_name, content, size, hash_sha256, resources):
Tag = ' <file name="' + file_name + '">\n'
if save_name is not None:
self.add_ExpectedFiles ([save_name, content])
size_Tag = self.size_tag (content, size)
if size_Tag is not None:
Tag += size_Tag + '\n'
verification_Tag = self.verification_tag (content, hash_sha256)
if verification_Tag is not None:
Tag += verification_Tag + '\n'
Tag += self.resources_tag (resources) + '\n'
Tag += ' </file>'
return Tag
# Create the size tag.
#
# size:
# True auto-compute size
# None no <size></size>
# any use this size
#
# ARGUMENTS:
#
# "content", size
def size_tag (self, content = None, size = None):
Tag = None
if content is not None and size is True:
size = len (content)
if size is not None:
Tag = ' <size>' + str (size) + '</size>'
return Tag
# Create the verification tag.
#
# hash_sha256:
# False no <verification></verification>
# True auto-compute sha256
# None no <hash></hash>
# any use this hash
#
# ARGUMENTS:
#
# "content", hash_sha256
def verification_tag (self, content = None, hash_sha256 = None):
Tag = None
if hash_sha256 is not False:
if content is not None and hash_sha256 is True:
hash_sha256 = hashlib.sha256 (content.encode ('UTF-8')).hexdigest ()
if hash_sha256 is None:
Tag = ' <verification>\n' + \
' </verification>'
else:
Tag = ' <verification>\n' + \
' <hash type="sha256">' + str (hash_sha256) + '</hash>\n' + \
' </verification>'
return Tag
# Create the resources tag.
#
# ARGUMENTS:
#
# ["srv_file", "srv_content", utype, location, preference], # resource
# ["srv_file", "srv_content", utype, location, preference] # resource
def resources_tag (self, resources):
Tag = ' <resources>\n'
for (srv_file, srv_content, utype, location, preference) in resources:
Tag += self.url_tag (srv_file, srv_content, utype, location, preference) + '\n'
Tag += ' </resources>'
return Tag
# Create the url tag.
#
# Add the file to the list of Files when there is a content.
#
# ARGUMENTS:
#
# "srv_file", "srv_content", utype, location, preference # resource
def url_tag (self, srv_file, srv_content = None, utype = "http", location = None, preference = 999999):
Loc = ''
if location is not None:
Loc = 'location="' + location + '" '
Tag = ' ' + \
'<url ' + \
'type="' + utype + '" ' + \
Loc + \
'preference="' + str (preference) + '">' + \
'http://{{SRV_HOST}}:{{SRV_PORT}}/' + srv_file + \
'</url>'
if srv_content is not None:
self.add_ServerFiles ([srv_file, srv_content])
return Tag
| gpl-3.0 |
uclouvain/osis | program_management/forms/prerequisite.py | 1 | 3264 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.utils.translation import gettext_lazy as _
from education_group.forms.fields import UpperCaseCharField
from program_management.ddd.domain.node import NodeLearningUnitYear
from program_management.ddd.domain.program_tree import ProgramTree
from program_management.ddd.validators.validators_by_business_action import UpdatePrerequisiteValidatorList
class PrerequisiteForm(forms.Form):
prerequisite_string = UpperCaseCharField(
label=_("Prerequisite"),
required=False,
help_text=_(
"<b>Syntax rules</b>:<ul><li>No double parentheses.</li><li>Valid operators are OU or ET.</li><li>The "
"operator must be the same inside all parentheses (groups).</li><li>The operator that linked groups must "
"be different than the one that linked LU inside groups (parentheses).</li><li>The LU code cannot include "
"spaces (ex: LDROI1001 and not LDROI 1001).</li></ul></p><p><b>Examples</b>:<ul><li>A OU B OU C: "
"valid</li><li>A ET B ET C : valid</li><li>A ET (B OU C) ET (D OU E): valid</li><li>A ET (B OU C) OU (D OU "
"E): not valid</li><li>A ET (B ET C) ET (D ET E): not valid</li><li>A ET (B OU C) ET (D ET E): not valid"
"</li></ul>"
),
)
def __init__(self, program_tree: ProgramTree, node: NodeLearningUnitYear, *args, **kwargs):
super().__init__(*args, **kwargs)
self.program_tree = program_tree
self.node = node
def clean_prerequisite_string(self):
prerequisite_string = self.cleaned_data["prerequisite_string"].upper()
validator = UpdatePrerequisiteValidatorList(prerequisite_string, self.node, self.program_tree)
if not validator.is_valid():
for error_message in validator.error_messages:
self.add_error("prerequisite_string", error_message.message)
return prerequisite_string
def save(self, commit=False):
pass
| agpl-3.0 |
fedspendingtransparency/data-act-core | dataactcore/scripts/databaseSetup.py | 1 | 1596 | import sqlalchemy_utils
from dataactcore.config import CONFIG_DB, ALEMBIC_PATH, MIGRATION_PATH
from alembic.config import Config
from alembic import command
from sqlalchemy.exc import ProgrammingError
def createDatabase(dbName):
"""Create specified database if it doesn't exist."""
config = CONFIG_DB
connectString = "postgresql://{}:{}@{}:{}/{}".format(config["username"],
config["password"], config["host"], config["port"],
dbName)
if not sqlalchemy_utils.database_exists(connectString):
sqlalchemy_utils.create_database(connectString)
def dropDatabase(dbName):
"""Drop specified database."""
config = CONFIG_DB
connectString = "postgresql://{}:{}@{}:{}/{}".format(config["username"],
config["password"], config["host"], config["port"], dbName)
if sqlalchemy_utils.database_exists(connectString):
sqlalchemy_utils.drop_database(connectString)
def runMigrations(alembicDbName):
"""Run Alembic migrations for a specific database/model set.
Args:
alembicDbName: the database to target (must match one of the
default databases in alembic.ini.
"""
alembic_cfg = Config(ALEMBIC_PATH)
alembic_cfg.set_main_option("script_location", MIGRATION_PATH)
alembic_cfg.set_main_option("databases", alembicDbName)
try:
command.upgrade(alembic_cfg, "head")
except ProgrammingError as e:
if "relation" and "already exists" in e.message:
raise Exception("Cannot run initial db migration if tables "
"already exist. " + e.message)
| cc0-1.0 |
alexissmirnov/donomo | donomo_archive/deps/paypal.jonboxall/standard/pdt/tests/pdt.py | 1 | 5162 | """
run this with ./manage.py test website
see http://www.djangoproject.com/documentation/testing/ for details
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import Context
from django.template.loader import get_template
from django.test import TestCase
from django.test.client import Client
from paypal.standard.pdt.forms import PayPalPDTForm
from paypal.standard.pdt.models import PayPalPDT
from paypal.standard.pdt.signals import pdt_successful, pdt_failed
class DummyPayPalPDT():
def __init__(self, update_context_dict={}):
self.context_dict = {'st': 'SUCCESS', 'custom':'cb736658-3aad-4694-956f-d0aeade80194',
'txn_id':'1ED550410S3402306', 'mc_gross': '225.00',
'business': settings.PAYPAL_RECEIVER_EMAIL, 'error': 'Error code: 1234'}
self.context_dict.update(update_context_dict)
def update_with_get_params(self, get_params):
if get_params.has_key('tx'):
self.context_dict['txn_id'] = get_params.get('tx')
if get_params.has_key('amt'):
self.context_dict['mc_gross'] = get_params.get('amt')
if get_params.has_key('cm'):
self.context_dict['custom'] = get_params.get('cm')
def _postback(self, test=True):
"""
Perform a Fake PayPal PDT Postback request.
"""
t = get_template('pdt/fake_pdt_response.html')
c = Context(self.context_dict)
html = t.render(c)
return html
class PDTTest(TestCase):
def setUp(self):
# set up some dummy PDT get parameters
self.get_params = {"tx":"4WJ86550014687441", "st":"Completed", "amt":"225.00", "cc":"EUR",
"cm":"a3e192b8%2d8fea%2d4a86%2db2e8%2dd5bf502e36be", "item_number":"",
"sig":"blahblahblah"}
# monkey patch the PayPalPDT._postback function
self.dpppdt = DummyPayPalPDT()
self.dpppdt.update_with_get_params(self.get_params)
PayPalPDT._postback = self.dpppdt._postback
# Every test needs a client.
self.client = Client()
def test_parse_paypal_response(self):
dpppdt = DummyPayPalPDT()
paypal_response = dpppdt._postback()
assert('SUCCESS' in paypal_response)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
pdt_obj = PayPalPDT()
pdt_obj.ipaddress = '127.0.0.1'
pdt_obj._parse_paypal_response(paypal_response)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.assertEqual(pdt_obj.txn_id, '1ED550410S3402306')
def test_pdt(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.dpppdt.update_with_get_params(self.get_params)
paypal_response = self.client.get(reverse('paypal-pdt'), self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
def test_pdt_signals(self):
self.successful_pdt_fired = False
self.failed_pdt_fired = False
def successful_pdt(sender, **kwargs):
self.successful_pdt_fired = True
pdt_successful.connect(successful_pdt)
def failed_pdt(sender, **kwargs):
self.failed_pdt_fired = True
pdt_failed.connect(failed_pdt)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
paypal_response = self.client.get(reverse('paypal-pdt'), self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
self.assertTrue(self.successful_pdt_fired)
self.assertFalse(self.failed_pdt_fired)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
def test_double_pdt_get(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
paypal_response = self.client.get(reverse('paypal-pdt'), self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
paypal_response = self.client.get(reverse('paypal-pdt'), self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1) # we don't create a new pdt
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
def test_no_txn_id_in_pdt(self):
self.dpppdt.context_dict.pop('txn_id')
self.get_params={}
paypal_response = self.client.get(reverse('paypal-pdt'), self.get_params)
self.assertContains(paypal_response, 'Transaction Failed', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
| bsd-3-clause |
PIK4/pyspider | pyspider/result/result_worker.py | 74 | 2536 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-19 15:37:46
import time
import json
import logging
from six.moves import queue as Queue
logger = logging.getLogger("result")
class ResultWorker(object):
"""
do with result
override this if needed.
"""
def __init__(self, resultdb, inqueue):
self.resultdb = resultdb
self.inqueue = inqueue
self._quit = False
def on_result(self, task, result):
'''Called every result'''
if not result:
return
if 'taskid' in task and 'project' in task and 'url' in task:
logger.info('result %s:%s %s -> %.30r' % (
task['project'], task['taskid'], task['url'], result))
return self.resultdb.save(
project=task['project'],
taskid=task['taskid'],
url=task['url'],
result=result
)
else:
logger.warning('result UNKNOW -> %.30r' % result)
return
def quit(self):
self._quit = True
def run(self):
'''Run loop'''
logger.info("result_worker starting...")
while not self._quit:
try:
task, result = self.inqueue.get(timeout=1)
self.on_result(task, result)
except Queue.Empty as e:
continue
except KeyboardInterrupt:
break
except AssertionError as e:
logger.error(e)
continue
except Exception as e:
logger.exception(e)
continue
logger.info("result_worker exiting...")
class OneResultWorker(ResultWorker):
'''Result Worker for one mode, write results to stdout'''
def on_result(self, task, result):
'''Called every result'''
if not result:
return
if 'taskid' in task and 'project' in task and 'url' in task:
logger.info('result %s:%s %s -> %.30r' % (
task['project'], task['taskid'], task['url'], result))
print(json.dumps({
'taskid': task['taskid'],
'project': task['project'],
'url': task['url'],
'result': result,
'updatetime': time.time()
}))
else:
logger.warning('result UNKNOW -> %.30r' % result)
return
| apache-2.0 |
parthea/pydatalab | datalab/data/_csv.py | 6 | 7063 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements usefule CSV utilities."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import next
from builtins import str as newstr
from builtins import range
from builtins import object
import csv
import os
import pandas as pd
import random
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
import datalab.storage
import datalab.utils
_MAX_CSV_BYTES = 10000000
class Csv(object):
"""Represents a CSV file in GCS or locally with same schema.
"""
def __init__(self, path, delimiter=b','):
"""Initializes an instance of a Csv instance.
Args:
path: path of the Csv file.
delimiter: the separator used to parse a Csv line.
"""
self._path = path
self._delimiter = delimiter
@property
def path(self):
return self._path
@staticmethod
def _read_gcs_lines(path, max_lines=None):
return datalab.storage.Item.from_url(path).read_lines(max_lines)
@staticmethod
def _read_local_lines(path, max_lines=None):
lines = []
for line in open(path):
if max_lines is not None and len(lines) >= max_lines:
break
lines.append(line)
return lines
def _is_probably_categorical(self, column):
if newstr(column.dtype) != 'object':
# only string types (represented in DataFrame as object) can potentially be categorical
return False
if len(max(column, key=lambda p: len(newstr(p)))) > 100:
return False # value too long to be a category
if len(set(column)) > 100:
return False # too many unique values to be a category
return True
def browse(self, max_lines=None, headers=None):
"""Try reading specified number of lines from the CSV object.
Args:
max_lines: max number of lines to read. If None, the whole file is read
headers: a list of strings as column names. If None, it will use "col0, col1..."
Returns:
A pandas DataFrame with the schema inferred from the data.
Raises:
Exception if the csv object cannot be read or not enough lines to read, or the
headers size does not match columns size.
"""
if self.path.startswith('gs://'):
lines = Csv._read_gcs_lines(self.path, max_lines)
else:
lines = Csv._read_local_lines(self.path, max_lines)
if len(lines) == 0:
return pd.DataFrame(columns=headers)
columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter)))
if headers is None:
headers = ['col' + newstr(e) for e in range(columns_size)]
if len(headers) != columns_size:
raise Exception('Number of columns in CSV do not match number of headers')
buf = StringIO()
for line in lines:
buf.write(line)
buf.write('\n')
buf.seek(0)
df = pd.read_csv(buf, names=headers, delimiter=self._delimiter)
for key, col in df.iteritems():
if self._is_probably_categorical(col):
df[key] = df[key].astype('category')
return df
def _create_federated_table(self, skip_header_rows):
import datalab.bigquery as bq
df = self.browse(1, None)
# read each column as STRING because we only want to sample rows.
schema_train = bq.Schema([{'name': name, 'type': 'STRING'} for name in df.keys()])
options = bq.CSVOptions(skip_leading_rows=(1 if skip_header_rows is True else 0))
return bq.FederatedTable.from_storage(self.path,
csv_options=options,
schema=schema_train,
max_bad_records=0)
def _get_gcs_csv_row_count(self, federated_table):
import datalab.bigquery as bq
results = bq.Query('SELECT count(*) from data',
data_sources={'data': federated_table}).results()
return results[0].values()[0]
def sample_to(self, count, skip_header_rows, strategy, target):
"""Sample rows from GCS or local file and save results to target file.
Args:
count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number.
skip_header_rows: whether to skip first row when reading from source.
strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory,
and number of resulting rows matches count. If BigQuery, sampling is done
with BigQuery in cloud, and the number of resulting rows will be approximated to
count.
target: The target file path, can be GCS or local path.
Raises:
Exception if strategy is "BIGQUERY" but source is not a GCS path.
"""
# TODO(qimingj) Add unit test
# Read data from source into DataFrame.
if sys.version_info.major > 2:
xrange = range # for python 3 compatibility
if strategy == 'BIGQUERY':
import datalab.bigquery as bq
if not self.path.startswith('gs://'):
raise Exception('Cannot use BIGQUERY if data is not in GCS')
federated_table = self._create_federated_table(skip_header_rows)
row_count = self._get_gcs_csv_row_count(federated_table)
query = bq.Query('SELECT * from data', data_sources={'data': federated_table})
sampling = bq.Sampling.random(count * 100 / float(row_count))
sample = query.sample(sampling=sampling)
df = sample.to_dataframe()
elif strategy == 'LOCAL':
local_file = self.path
if self.path.startswith('gs://'):
local_file = tempfile.mktemp()
datalab.utils.gcs_copy_file(self.path, local_file)
with open(local_file) as f:
row_count = sum(1 for line in f)
start_row = 1 if skip_header_rows is True else 0
skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count
skip = sorted(random.sample(xrange(start_row, row_count), skip_count))
header_row = 0 if skip_header_rows is True else None
df = pd.read_csv(local_file, skiprows=skip, header=header_row, delimiter=self._delimiter)
if self.path.startswith('gs://'):
os.remove(local_file)
else:
raise Exception('strategy must be BIGQUERY or LOCAL')
# Write to target.
if target.startswith('gs://'):
with tempfile.NamedTemporaryFile() as f:
df.to_csv(f, header=False, index=False)
f.flush()
datalab.utils.gcs_copy_file(f.name, target)
else:
with open(target, 'w') as f:
df.to_csv(f, header=False, index=False, sep=str(self._delimiter))
| apache-2.0 |
BaichuanWu/Blog_on_django | site-packages/django/views/debug.py | 49 | 44795 | from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k, v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
default_exception_reporter_filter = import_string(
settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
if not os.path.isfile(path):
return "Not a file"
if not os.access(path, os.R_OK):
return "File is not readable"
return "File exists"
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
# If the template_source_loaders haven't been populated yet, you need
# to provide an empty list for this for loop to not fail.
if template_source_loaders is None:
template_source_loaders = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append((num, escape(template_source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% load firstof from future %}{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>
Of course, you haven't actually done any work yet.
Next, start your first app by running <code>python manage.py startapp [app_label]</code>.
</p>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| mit |
sebadiaz/rethinkdb | external/v8_3.30.33.16/build/gyp/pylib/gyp/MSVSSettings_test.py | 778 | 65880 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 3)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
NoUsername/PrivateNotesExperimental | lib/django_openid_auth/forms.py | 23 | 3582 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2007 Simon Willison
# Copyright (C) 2008-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.models import Group
from django.utils.translation import ugettext as _
from django.conf import settings
from openid.yadis import xri
def teams_new_unicode(self):
"""
Replacement for Group.__unicode__()
Calls original method to chain results
"""
name = self.unicode_before_teams()
teams_mapping = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING', {})
group_teams = [t for t in teams_mapping if teams_mapping[t] == self.name]
if len(group_teams) > 0:
return "%s -> %s" % (name, ", ".join(group_teams))
else:
return name
Group.unicode_before_teams = Group.__unicode__
Group.__unicode__ = teams_new_unicode
class UserChangeFormWithTeamRestriction(UserChangeForm):
"""
Extends UserChangeForm to add teams awareness to the user admin form
"""
def clean_groups(self):
data = self.cleaned_data['groups']
teams_mapping = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING', {})
known_teams = teams_mapping.values()
user_groups = self.instance.groups.all()
for group in data:
if group.name in known_teams and group not in user_groups:
raise forms.ValidationError("""The group %s is mapped to an
external team. You cannot assign it manually.""" % group.name)
return data
UserAdmin.form = UserChangeFormWithTeamRestriction
class OpenIDLoginForm(forms.Form):
openid_identifier = forms.CharField(
max_length=255,
widget=forms.TextInput(attrs={'class': 'required openid'}))
def clean_openid_identifier(self):
if 'openid_identifier' in self.cleaned_data:
openid_identifier = self.cleaned_data['openid_identifier']
if xri.identifierScheme(openid_identifier) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
raise forms.ValidationError(_('i-names are not supported'))
return self.cleaned_data['openid_identifier']
| agpl-3.0 |
Dinnerbone/mcstatus | mcstatus/tests/protocol/test_connection.py | 1 | 9117 | import pytest
from mock import Mock, patch
from mcstatus.protocol.connection import (
Connection,
TCPSocketConnection,
UDPSocketConnection,
)
class TestConnection:
connection: Connection
def setup_method(self):
self.connection = Connection()
def test_flush(self):
self.connection.sent = bytearray.fromhex("7FAABB")
assert self.connection.flush() == bytearray.fromhex("7FAABB")
assert self.connection.sent == bytearray()
def test_receive(self):
self.connection.receive(bytearray.fromhex("7F"))
self.connection.receive(bytearray.fromhex("AABB"))
assert self.connection.received == bytearray.fromhex("7FAABB")
def test_remaining(self):
self.connection.receive(bytearray.fromhex("7F"))
self.connection.receive(bytearray.fromhex("AABB"))
assert self.connection.remaining() == 3
def test_send(self):
self.connection.write(bytearray.fromhex("7F"))
self.connection.write(bytearray.fromhex("AABB"))
assert self.connection.flush() == bytearray.fromhex("7FAABB")
def test_read(self):
self.connection.receive(bytearray.fromhex("7FAABB"))
assert self.connection.read(2) == bytearray.fromhex("7FAA")
assert self.connection.read(1) == bytearray.fromhex("BB")
def _assert_varint_read_write(self, hexstr, value):
self.connection.receive(bytearray.fromhex(hexstr))
assert self.connection.read_varint() == value
self.connection.write_varint(value)
assert self.connection.flush() == bytearray.fromhex(hexstr)
def test_varint_cases(self):
self._assert_varint_read_write("00", 0)
self._assert_varint_read_write("01", 1)
self._assert_varint_read_write("0F", 15)
self._assert_varint_read_write("FFFFFFFF07", 2147483647)
self._assert_varint_read_write("FFFFFFFF0F", -1)
self._assert_varint_read_write("8080808008", -2147483648)
def test_readInvalidVarInt(self):
self.connection.receive(bytearray.fromhex("FFFFFFFF80"))
with pytest.raises(IOError):
self.connection.read_varint()
def test_writeInvalidVarInt(self):
with pytest.raises(ValueError):
self.connection.write_varint(2147483648)
with pytest.raises(ValueError):
self.connection.write_varint(-2147483649)
def test_readUtf(self):
self.connection.receive(bytearray.fromhex("0D48656C6C6F2C20776F726C6421"))
assert self.connection.read_utf() == "Hello, world!"
def test_writeUtf(self):
self.connection.write_utf("Hello, world!")
assert self.connection.flush() == bytearray.fromhex("0D48656C6C6F2C20776F726C6421")
def test_readEmptyUtf(self):
self.connection.write_utf("")
assert self.connection.flush() == bytearray.fromhex("00")
def test_readAscii(self):
self.connection.receive(bytearray.fromhex("48656C6C6F2C20776F726C642100"))
assert self.connection.read_ascii() == "Hello, world!"
def test_writeAscii(self):
self.connection.write_ascii("Hello, world!")
assert self.connection.flush() == bytearray.fromhex("48656C6C6F2C20776F726C642100")
def test_readEmptyAscii(self):
self.connection.write_ascii("")
assert self.connection.flush() == bytearray.fromhex("00")
def test_readShortNegative(self):
self.connection.receive(bytearray.fromhex("8000"))
assert self.connection.read_short() == -32768
def test_writeShortNegative(self):
self.connection.write_short(-32768)
assert self.connection.flush() == bytearray.fromhex("8000")
def test_readShortPositive(self):
self.connection.receive(bytearray.fromhex("7FFF"))
assert self.connection.read_short() == 32767
def test_writeShortPositive(self):
self.connection.write_short(32767)
assert self.connection.flush() == bytearray.fromhex("7FFF")
def test_readUShortPositive(self):
self.connection.receive(bytearray.fromhex("8000"))
assert self.connection.read_ushort() == 32768
def test_writeUShortPositive(self):
self.connection.write_ushort(32768)
assert self.connection.flush() == bytearray.fromhex("8000")
def test_readIntNegative(self):
self.connection.receive(bytearray.fromhex("80000000"))
assert self.connection.read_int() == -2147483648
def test_writeIntNegative(self):
self.connection.write_int(-2147483648)
assert self.connection.flush() == bytearray.fromhex("80000000")
def test_readIntPositive(self):
self.connection.receive(bytearray.fromhex("7FFFFFFF"))
assert self.connection.read_int() == 2147483647
def test_writeIntPositive(self):
self.connection.write_int(2147483647)
assert self.connection.flush() == bytearray.fromhex("7FFFFFFF")
def test_readUIntPositive(self):
self.connection.receive(bytearray.fromhex("80000000"))
assert self.connection.read_uint() == 2147483648
def test_writeUIntPositive(self):
self.connection.write_uint(2147483648)
assert self.connection.flush() == bytearray.fromhex("80000000")
def test_readLongNegative(self):
self.connection.receive(bytearray.fromhex("8000000000000000"))
assert self.connection.read_long() == -9223372036854775808
def test_writeLongNegative(self):
self.connection.write_long(-9223372036854775808)
assert self.connection.flush() == bytearray.fromhex("8000000000000000")
def test_readLongPositive(self):
self.connection.receive(bytearray.fromhex("7FFFFFFFFFFFFFFF"))
assert self.connection.read_long() == 9223372036854775807
def test_writeLongPositive(self):
self.connection.write_long(9223372036854775807)
assert self.connection.flush() == bytearray.fromhex("7FFFFFFFFFFFFFFF")
def test_readULongPositive(self):
self.connection.receive(bytearray.fromhex("8000000000000000"))
assert self.connection.read_ulong() == 9223372036854775808
def test_writeULongPositive(self):
self.connection.write_ulong(9223372036854775808)
assert self.connection.flush() == bytearray.fromhex("8000000000000000")
def test_readBuffer(self):
self.connection.receive(bytearray.fromhex("027FAA"))
buffer = self.connection.read_buffer()
assert buffer.received == bytearray.fromhex("7FAA")
assert self.connection.flush() == bytearray()
def test_writeBuffer(self):
buffer = Connection()
buffer.write(bytearray.fromhex("7FAA"))
self.connection.write_buffer(buffer)
assert self.connection.flush() == bytearray.fromhex("027FAA")
class TCPSocketConnectionTest:
def setup_method(self):
socket = Mock()
socket.recv = Mock()
socket.send = Mock()
with patch("socket.create_connection") as create_connection:
create_connection.return_value = socket
self.connection = TCPSocketConnection(("localhost", 1234))
def test_flush(self):
with pytest.raises(TypeError):
self.connection.flush()
def test_receive(self):
with pytest.raises(TypeError):
self.connection.receive("")
def test_remaining(self):
with pytest.raises(TypeError):
self.connection.remaining()
def test_read(self):
self.connection.socket.recv.return_value = bytearray.fromhex("7FAA")
assert self.connection.read(2) == bytearray.fromhex("7FAA")
def test_read_empty(self):
self.connection.socket.recv.return_value = bytearray.fromhex("")
with pytest.raises(IOError):
self.connection.read(2)
def test_write(self):
self.connection.write(bytearray.fromhex("7FAA"))
# pytype: disable=attribute-error
self.connection.socket.send.assert_called_once_with(bytearray.fromhex("7FAA"))
# pytype: enable=attribute-error
class UDPSocketConnectionTest:
def setup_method(self):
socket = Mock()
socket.recvfrom = Mock()
socket.sendto = Mock()
with patch("socket.socket") as create_socket:
create_socket.return_value = socket
self.connection = UDPSocketConnection(("localhost", 1234))
def test_flush(self):
with pytest.raises(TypeError):
self.connection.flush()
def test_receive(self):
with pytest.raises(TypeError):
self.connection.receive("")
def test_remaining(self):
assert self.connection.remaining() == 65535
def test_read(self):
self.connection.socket.recvfrom.return_value = [bytearray.fromhex("7FAA")]
assert self.connection.read(2) == bytearray.fromhex("7FAA")
def test_write(self):
self.connection.write(bytearray.fromhex("7FAA"))
# pytype: disable=attribute-error
self.connection.socket.sendto.assert_called_once_with(bytearray.fromhex("7FAA"), ("localhost", 1234))
# pytype: enable=attribute-error
| apache-2.0 |
wayblink/Naive | spark/spark-sql-perf/dev/merge_pr.py | 9 | 18840 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_SQL_PERF_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "origin")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/databricks/spark-sql-perf/pull"
GITHUB_API_BASE = "https://api.github.com/repos/databricks/spark-sql-perf"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print "Exceeded the GitHub API rate limit; see the instructions in " + \
"dev/merge_spark_pr.py to configure an OAuth token for making authenticated " + \
"GitHub requests."
else:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = raw_input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Spark.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("SPARK")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX] [MLLIB] Issue"
>>> standardize_jira_ref("[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref("[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123] [PROJECT INFRA] [WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954] [MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref("SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146] [WIP] Vagrant support for Spark'
>>> standardize_jira_ref("SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref("[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250] [SPARK-6146] [SPARK-5911] [SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\] (\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ' '.join(jira_refs).strip() + " " + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = run_cmd("git rev-parse HEAD")[:8]
branches = get_json("%s/branches" % GITHUB_API_BASE)
#branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = "master"
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % pr["title"]
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print "Using modified title:"
else:
title = pr["title"]
print "Using original title:"
print title
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira-python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
main()
| mit |
imnutz/support-tools | googlecode-issues-exporter/bitbucket_issue_converter_test.py | 51 | 6257 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BitBucket Services."""
# pylint: disable=missing-docstring,protected-access
import unittest
import bitbucket_issue_converter
import issues
from issues_test import DEFAULT_USERNAME
from issues_test import SINGLE_COMMENT
from issues_test import SINGLE_ISSUE
from issues_test import COMMENT_ONE
from issues_test import COMMENT_TWO
from issues_test import COMMENT_THREE
from issues_test import COMMENTS_DATA
from issues_test import NO_ISSUE_DATA
from issues_test import USER_MAP
# The BitBucket username.
BITBUCKET_USERNAME = DEFAULT_USERNAME
# The BitBucket repo name.
BITBUCKET_REPO = "repo"
class TestUserService(unittest.TestCase):
"""Tests for the UserService."""
def setUp(self):
self._bitbucket_user_service = bitbucket_issue_converter.UserService()
def testIsUser123(self):
is_user = self._bitbucket_user_service.IsUser("username123")
self.assertTrue(is_user)
def testIsUser321(self):
is_user = self._bitbucket_user_service.IsUser("username321")
self.assertTrue(is_user)
class TestIssueService(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self._bitbucket_issue_service = bitbucket_issue_converter.IssueService()
self.maxDiff = None
def testCreateIssue(self):
issue_body = {
"assignee": "a_uthor",
"content": ("Original [issue 1](https://code.google.com/p/repo/issues" +
"/detail?id=1) created by a_uthor on last year:\n\none"),
"content_updated_on": "last month",
"created_on": "last year",
"id": 1,
"kind": "bug",
"priority": "minor",
"reporter": None,
"status": "resolved",
"title": "issue_title",
"updated_on": "last year",
}
issue_number = self._bitbucket_issue_service.CreateIssue(SINGLE_ISSUE)
self.assertEqual(1, issue_number)
actual = self._bitbucket_issue_service._bitbucket_issues[0]
self.assertEqual(issue_body, actual)
def testCloseIssue(self):
# no-op
self._bitbucket_issue_service.CloseIssue(123)
def testCreateComment(self):
comment_body = {
"content": (
"Comment [#1](https://code.google.com/p/repo/issues/detail" +
"?id=1#c1) originally posted by a_uthor on last year:\n\none"),
"created_on": "last year",
"id": 1,
"issue": 1,
"updated_on": "last year",
"user": "a_uthor",
}
self._bitbucket_issue_service.CreateComment(
1, "1", SINGLE_COMMENT, BITBUCKET_REPO)
actual = self._bitbucket_issue_service._bitbucket_comments[0]
self.assertEqual(comment_body, actual)
class TestIssueExporter(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self._bitbucket_user_service = bitbucket_issue_converter.UserService()
self._bitbucket_issue_service = bitbucket_issue_converter.IssueService()
self.issue_exporter = issues.IssueExporter(
self._bitbucket_issue_service, self._bitbucket_user_service,
NO_ISSUE_DATA, BITBUCKET_REPO, USER_MAP)
self.issue_exporter.Init()
def testGetAllPreviousIssues(self):
self.assertEqual(0, len(self.issue_exporter._previously_created_issues))
self.issue_exporter._GetAllPreviousIssues()
self.assertEqual(0, len(self.issue_exporter._previously_created_issues))
def testCreateIssue(self):
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1, issue_number)
def testCreateComments(self):
self.assertEqual(0, self.issue_exporter._comment_number)
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
self.assertEqual(4, self.issue_exporter._comment_number)
def testStart(self):
self.issue_exporter._issue_json_data = [
{
"id": "1",
"title": "Title1",
"state": "open",
"status": "New",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO, COMMENT_THREE],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
"published": "last year",
"updated": "last month",
},
{
"id": "2",
"title": "Title2",
"state": "closed",
"status": "Fixed",
"owner": {"kind": "projecthosting#issuePerson",
"name": "User2"
},
"labels": [],
"comments": {
"items": [COMMENT_ONE],
},
"published": "last month",
"updated": "last week",
},
{
"id": "3",
"title": "Title3",
"state": "closed",
"status": "WontFix",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO],
},
"labels": ["Type-Defect"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User3"
},
"published": "last week",
"updated": "yesterday",
}]
self.issue_exporter.Start()
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
# Comment counts are per issue and should match the numbers from the last
# issue created, minus one for the first comment, which is really
# the issue description.
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
if __name__ == "__main__":
unittest.main(buffer=True)
| apache-2.0 |
auto-mat/klub | apps/aklub/migrations/0040_auto_20170117_1325.py | 1 | 1245 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-17 13:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aklub', '0039_auto_20161221_1256'),
]
operations = [
migrations.AddField(
model_name='taxconfirmation',
name='user_profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='aklub.UserProfile'),
),
migrations.AlterField(
model_name='campaign',
name='result',
field=models.ManyToManyField(blank=True, to='aklub.Result', verbose_name='Acceptable results of communication'),
),
migrations.AlterField(
model_name='masscommunication',
name='template',
field=models.TextField(help_text='Template can contain variable substitutions like addressment, name, variable symbol etc.', max_length=50000, null=True, verbose_name='Template'),
),
migrations.AlterUniqueTogether(
name='taxconfirmation',
unique_together=set([('user_profile', 'year')]),
),
]
| gpl-3.0 |
tejal29/pants | src/python/pants/goal/aggregated_timings.py | 1 | 1733 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from pants.util.dirutil import safe_mkdir_for
class AggregatedTimings(object):
"""Aggregates timings over multiple invocations of 'similar' work.
If filepath is not none, stores the timings in that file. Useful for finding bottlenecks."""
def __init__(self, path=None):
# Map path -> timing in seconds (a float)
self._timings_by_path = defaultdict(float)
self._tool_labels = set()
self._path = path
safe_mkdir_for(self._path)
def add_timing(self, label, secs, is_tool=False):
"""Aggregate timings by label.
secs - a double, so fractional seconds are allowed.
is_tool - whether this label represents a tool invocation.
"""
self._timings_by_path[label] += secs
if is_tool:
self._tool_labels.add(label)
# Check existence in case we're a clean-all. We don't want to write anything in that case.
if self._path and os.path.exists(os.path.dirname(self._path)):
with open(self._path, 'w') as f:
for x in self.get_all():
f.write('%(label)s: %(timing)s\n' % x)
def get_all(self):
"""Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> }
"""
return [{ 'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}
for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)]
| apache-2.0 |
wutitoudi/p2pool | p2pool/bitcoin/script.py | 282 | 2589 | from p2pool.util import math, pack
def reads_nothing(f):
return None, f
def protoPUSH(length):
return lambda f: pack.read(f, length)
def protoPUSHDATA(size_len):
def _(f):
length_str, f = pack.read(f, size_len)
length = math.string_to_natural(length_str[::-1].lstrip(chr(0)))
data, f = pack.read(f, length)
return data, f
return _
opcodes = {}
for i in xrange(256):
opcodes[i] = 'UNK_' + str(i), reads_nothing
opcodes[0] = 'PUSH', lambda f: ('', f)
for i in xrange(1, 76):
opcodes[i] = 'PUSH', protoPUSH(i)
opcodes[76] = 'PUSH', protoPUSHDATA(1)
opcodes[77] = 'PUSH', protoPUSHDATA(2)
opcodes[78] = 'PUSH', protoPUSHDATA(4)
opcodes[79] = 'PUSH', lambda f: ('\x81', f)
for i in xrange(81, 97):
opcodes[i] = 'PUSH', lambda f, _i=i: (chr(_i - 80), f)
opcodes[172] = 'CHECKSIG', reads_nothing
opcodes[173] = 'CHECKSIGVERIFY', reads_nothing
opcodes[174] = 'CHECKMULTISIG', reads_nothing
opcodes[175] = 'CHECKMULTISIGVERIFY', reads_nothing
def parse(script):
f = script, 0
while pack.size(f):
opcode_str, f = pack.read(f, 1)
opcode = ord(opcode_str)
opcode_name, read_func = opcodes[opcode]
opcode_arg, f = read_func(f)
yield opcode_name, opcode_arg
def get_sigop_count(script):
weights = {
'CHECKSIG': 1,
'CHECKSIGVERIFY': 1,
'CHECKMULTISIG': 20,
'CHECKMULTISIGVERIFY': 20,
}
return sum(weights.get(opcode_name, 0) for opcode_name, opcode_arg in parse(script))
def create_push_script(datums): # datums can be ints or strs
res = []
for datum in datums:
if isinstance(datum, (int, long)):
if datum == -1 or 1 <= datum <= 16:
res.append(chr(datum + 80))
continue
negative = datum < 0
datum = math.natural_to_string(abs(datum))
if datum and ord(datum[0]) & 128:
datum = '\x00' + datum
if negative:
datum = chr(ord(datum[0]) + 128) + datum[1:]
datum = datum[::-1]
if len(datum) < 76:
res.append(chr(len(datum)))
elif len(datum) <= 0xff:
res.append(76)
res.append(chr(len(datum)))
elif len(datum) <= 0xffff:
res.append(77)
res.append(pack.IntType(16).pack(len(datum)))
elif len(datum) <= 0xffffffff:
res.append(78)
res.append(pack.IntType(32).pack(len(datum)))
else:
raise ValueError('string too long')
res.append(datum)
return ''.join(res)
| gpl-3.0 |
sthalik/git-cola | extras/qtpy/qtpy/QtWebEngineWidgets.py | 3 | 1337 | # -*- coding: utf-8 -*-
#
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtWebEngineWidgets classes and functions.
"""
from qtpy import PYQT5, PYQT4, PYSIDE, PythonQtError
# To test if we are using WebEngine or WebKit
WEBENGINE = True
if PYQT5:
try:
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
except ImportError:
from PyQt5.QtWebKitWidgets import QWebPage as QWebEnginePage
from PyQt5.QtWebKitWidgets import QWebView as QWebEngineView
from PyQt5.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
elif PYQT4:
from PyQt4.QtWebKit import QWebPage as QWebEnginePage
from PyQt4.QtWebKit import QWebView as QWebEngineView
from PyQt4.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
elif PYSIDE:
from PySide.QtWebKit import QWebPage as QWebEnginePage
from PySide.QtWebKit import QWebView as QWebEngineView
from PySide.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
else:
raise PythonQtError('No Qt bindings could be found')
| gpl-2.0 |
SciLifeLab/standalone_scripts | project_status_extended.py | 4 | 30984 | import sys, os, glob
import argparse
from operator import itemgetter
import subprocess
uppmax_id = 'ngi2016003'
def init_sample_hash_emtry():
empty_sample_result = {
'#Archived_runs' : 0,
'#Data_runs': 0,
'#Analysis_runs': 0,
'#Reads':0,
'RowCov':0,
'#AlignedReads':0,
'%AlignedReads':0,
'AlignCov':0,
'%Dup':0,
'MedianInsertSize':0,
'GCpercentage':0,
'Delivered':False
}
return empty_sample_result
def find_samples_from_archive(roots, project, samples, stockholm=True):
"""given a project (e.g. P1775 or OB-0726) finds all samples sequenced for that specif project
it assumes that we never delete the folder stucture, but only fastq files
returns an hash with one sample name as key and number of seq runs that contain that sample
"""
for root in roots:
for dir in os.listdir(root):
if "_ST-" in dir:
#must be an X FC
run_dir = os.path.join(root, dir)
sample_dirs = glob.glob("{}/Demultiplexing/*/Sample_*".format(run_dir))
for sample in sample_dirs:
if stockholm:
sample_name = sample.split("/")[-1].replace("Sample_", "")
if not sample_name.startswith(project):
continue
else:#uppsala case
current_project = sample.split("/")[-2]
if project != current_project:
continue
sample_name = sample.split("/")[-1].replace("Sample_", "")
if not sample_name in samples:
samples[sample_name] = init_sample_hash_emtry()
archived_runs = len(glob.glob("{}/{}*L0*R1*fastq.gz".format(sample,sample_name)))
if archived_runs == 0: #stockholm case
sampe_name_hyphen = sample_name.replace("_", "-")
archived_runs = len(glob.glob("{}/{}*L00*R1*fastq.gz".format(sample,sampe_name_hyphen)))
samples[sample_name]["#Archived_runs"] += archived_runs
def find_sample_from_DATA(root, project, samples ):
"""given a project (e.g. P1775) finds all samples tranfered to DATA folder
returns an hash with one sample name as key and number of seq runs (or lanes runs)
"""
if not os.path.exists(os.path.join(root,project)):
return samples
for sample in os.listdir(os.path.join(root,project)):
#DATA/SAMPLE/LIB_PREPS/RUNS
if sample.startswith("."):
continue
sample_data_dir = os.path.join(root, project, sample)
sample_runs = glob.glob("{}/*/*/{}*L0*_R1*fastq.gz".format(sample_data_dir,sample)) #if sample splitted in multiple lanes there will be an entry per lane
if not sample in samples:
samples[sample] = init_sample_hash_emtry()
samples[sample]['#Data_runs'] = len(sample_runs)
def find_sample_from_ANALYSIS(root, project, samples):
"""given a project (e.g. P1775) finds all samples in ANALYSIS folder
returns an hash with one sample name as key and various stats on the sample
It does this by looking at the bam.out files that is present in the 01_raw_alignments folder
A sample is counted here if it is found in 01_raw_alignments
"""
raw_alignments_dir = os.path.join(root, project, "piper_ngi", "01_raw_alignments")
for sample_run in glob.glob("{}/*.out".format(raw_alignments_dir)):
sample_run_algn = sample_run.split("/")[-1] # this looks like P1775_102.AH2T7GCCXX.P1775_102.1.bam.out
sample_name = sample_run_algn.split(".")[0]
sample_lane = int(sample_run_algn.split(".")[3])
if not sample_name in samples:
samples[sample_name] = init_sample_hash_emtry()
samples[sample_name]['#Analysis_runs'] += 1
# now check if I can retrive other informaiton about this sample
for sample, sample_entry in samples.items():
genome_results_file = os.path.join(root, project, "piper_ngi", "06_final_alignment_qc",
"{}.clean.dedup.qc".format(sample),
"genome_results.txt")
if os.path.isfile(genome_results_file) and sample_entry['#Analysis_runs'] == 0:
sample_entry['#Analysis_runs'] = 1 # at least one is present
if sample_entry['#Analysis_runs'] > 0:
#if i have run some analysis on this sample fetch info about sequenced reads and coverage
picard_duplication_metrics = os.path.join(root, project, "piper_ngi", "05_processed_alignments",
"{}.metrics".format(sample))
if os.path.isfile(genome_results_file):
#store informations
parse_qualimap(genome_results_file, sample_entry)
if os.path.isfile(picard_duplication_metrics) and sample_entry['#Reads'] > 0:
# if picard file exists and bamqc has been parsed with success
parse_bamtools_markdup(picard_duplication_metrics, sample_entry)
def find_sample_from_DELIVERY(root, project, samples):
"""given a project (e.g. P1775) finds all samples in DELIVERED folder
returns an hash with one sample name as key the key delivered set as true or false
"""
project_delivery_dir = os.path.join(root,project)
if not os.path.exists(project_delivery_dir):
return None
for sample in os.listdir(project_delivery_dir):
if os.path.isdir(os.path.join(project_delivery_dir, sample)) and sample != "00-Reports":
if not sample in samples:
samples[sample] = init_sample_hash_emtry()
samples[sample]['Delivered'] = True
def parse_bamtools_markdup(picard_duplication_metrics, sample):
duplication = 0
with open(picard_duplication_metrics, 'r') as f:
for line in f:
line.strip()
if line.startswith("## METRICS CLASS"):
line = f.next() # this is the header
line = f.next().strip() # thisis the one I am intrested
duplicate_stats= line.split()
UNPAIRED_READ_DUPLICATES = int(duplicate_stats[4])
READ_PAIR_DUPLICATES = int(duplicate_stats[5])
PERCENT_DUPLICATION = float(duplicate_stats[7].replace(",", "."))# some times a comma is used
sample['%Dup'] = PERCENT_DUPLICATION
def parse_qualimap(genome_results_file, sample):
reference_size = 0
number_of_reads = 0
number_of_mapped_reads = 0
coverage_mapped = 0
coverage_raw = 0
GCpercentage = 0
MedianInsertSize = 0
autosomal_cov_length = 0
autosomal_cov_bases = 0
reference_section = False
global_section = False
coverage_section = False
coverage_section = False
coverage_per_contig_section = False
insertSize_section= False
with open(genome_results_file, 'r') as f:
for line in f:
if line.startswith('>>>>>>> Reference'):
reference_section = True
continue
if line.startswith('>>>>>>> Globals'):
reference_section = False
global_section = True
continue
if line.startswith('>>>>>>> Insert'):
global_section = False
insertSize_section= True
continue
if line.startswith('>>>>>>> Coverage per contig'):
coverage_section = False
coverage_per_contig_section = True
continue
if line.startswith('>>>>>>> Coverage'):
coverage_section = True
insertSize_section = False
continue
if reference_section:
line = line.strip()
if "number of bases" in line:
reference_size = int(line.split()[4].replace(",", ""))
reference_section = False
if global_section:
line = line.strip()
if "number of reads" in line:
number_of_reads = int(line.split()[4].replace(",", ""))
if "number of mapped reads" in line:
number_of_mapped_reads = int(line.split()[5].replace(",", ""))
if insertSize_section:
line = line.strip()
if "median insert size" in line:
MedianInsertSize = int(line.split()[4])
if coverage_section:
line = line.strip()
if "mean coverageData" in line:
coverage_mapped = float(line.split()[3].replace("X", ""))
if coverage_per_contig_section:
line = line.strip()
if line:
sections = line.split()
if sections[0].isdigit() and int(sections[0]) <= 22:
autosomal_cov_length += float(sections[1])
autosomal_cov_bases += float(sections[2])
sample['#Reads'] = number_of_reads
sample['RowCov'] = (number_of_reads*150)/float(reference_size)
sample['#AlignedReads'] = number_of_mapped_reads
sample['%AlignedReads'] = (float(number_of_mapped_reads)/number_of_reads)*100
sample['AlignCov'] = coverage_mapped
sample['MedianInsertSize'] = MedianInsertSize
sample['AutosomalCoverage'] = autosomal_cov_bases / autosomal_cov_length
def find_results_from_francesco(uppmax_project, project):
raw_data_dir = "/proj/{}/nobackup/NGI/DATA/".format(uppmax_project)
analysis_dir = "/proj/{}/nobackup/NGI/ANALYSIS/".format(uppmax_project)
delivery_dir = "/proj/{}/nobackup/NGI/DELIVERY/".format(uppmax_project)
archive_dir = ("/proj/{}/archive/".format(uppmax_project), "/proj/{}/incoming/".format(uppmax_project))
samples = {}
find_samples_from_archive(archive_dir, project, samples)
find_sample_from_DATA(raw_data_dir, project, samples)
find_sample_from_ANALYSIS(analysis_dir, project, samples)
find_sample_from_DELIVERY(delivery_dir, project, samples)
return samples
def get_low_coverage(project, results_francesco):
result = {}
for sample, sample_data in results_francesco.items():
coverage = sample_data.get('AutosomalCoverage')
try:
coverage = float(coverage)
if coverage < 28.5:
result[sample] = coverage
except:
# not printing error message
# if coverage is a string something like '29,1111' - comma will fail
# and we will never figure out what happens
result[sample] = coverage
return result
def get_samples_with_undetermined(data_dir, project):
""" get all fastq_files from DATA directory
check which ones named 'Undetermined'
then add sample and flowcell to the list
"""
result = {}
# get list of fastq_files in DATA directory
project_path = os.path.join(data_dir, project, '*/*/*/*.fastq*')
fastq_files = glob.glob(project_path)
for file_path in fastq_files:
# check which files are named 'Undetermined'
filename = os.path.basename(file_path)
if 'Undetermined' in filename:
# get sample and flowcell id
sample = filename.split('_Undetermined')[0]
flowcell = file_path.split('/')[-2]
# update result list
if sample not in result:
result[sample] = [flowcell]
elif flowcell not in result[sample]:
result[sample].append(flowcell)
return result
def get_samples_under_analysis(project):
result = []
command = "jobinfo | grep piper_{}".format(project)
try:
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()
except Exception, e:
print "Cannot execute command: {}".format(command)
raise e
output = output[0].split('\n')
for line in output:
# skip empty lines
if line.strip() != '':
sample = line.split('piper_{}-'.format(project))[-1].split('-')[0]
if sample not in result:
result.append(sample)
return result
def get_samples_under_qc(project):
result = []
command = "jobinfo | grep qc_{}".format(project)
try:
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()
except Exception, e:
print "Cannot execute command {}".format(command)
raise e
output = output[0].split('\n')
for line in output:
# skip empty lines
if line.strip() != '':
sample = line.split('qc_{}-'.format(project))[-1]
if sample not in result:
result.append(sample)
return result
def get_samples_with_failed_analysis(project, analysis_dir):
log_path = os.path.join(analysis_dir, '{}/piper_ngi/logs/{}-*.exit'.format(project, project))
under_analysis = get_samples_under_analysis(project)
exit_files = glob.glob(log_path)
result = {}
for path in exit_files:
with open(path, 'r') as exit_file:
exit_code = exit_file.read().strip()
# P4603-P4603_189-merge_process_variantcall.exit
sample = os.path.basename(path).replace('{}-'.format(project), '').split('-')[0]
if exit_code == '' and sample not in under_analysis:
if sample in result:
result[sample] = [result[sample]]
result[sample].append('Empty exit code, but sample is not under analysis')
else:
result[sample] = 'Empty exit code, but sample is not under analysis'
elif exit_code.strip() != '0' and exit_code != '':
if sample in result:
result[sample] = [result[sample]]
result[sample].append('Exit code: {}'.format(exit_code))
else:
result[sample] = 'Exit code: {}'.format(exit_code)
return result
def get_incoherent_samples(results_francesco):
result = {}
for sample_id, sample in results_francesco.items():
sequenced = sample.get('#Archived_runs', '')
organized = sample.get('#Data_runs', '')
analyzed = sample.get('#Analysis_runs', '')
try:
sequenced = int(sequenced)
organized = int(organized)
analyzed = int(analyzed)
# if not int or something strange in the results, print it too (just in case)
except ValueError, e:
result[sample_id] = {'sequenced': sequenced, 'organized': organized, 'analyzed': analyzed}
else:
if not(sequenced == organized == analyzed):
result[sample_id] = {'sequenced': sequenced, 'organized': organized, 'analyzed': analyzed}
return result
def get_sequenced(project):
incoming = "/proj/ngi2016003/incoming"
project_flowcells = {}
for fc in os.listdir(incoming):
sample_sheet = os.path.join(incoming, fc, 'SampleSheet.csv')
command = 'grep {} {}'.format(project, sample_sheet)
try:
p = subprocess.Popen(command, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception, e:
print 'Command failed: {}'.format(command)
raise e
output = p.communicate()[0]
if output:
for line in output.split('\n'):
# if not an empty line
if line:
try:
sample = line.split(',')[2]
except Exception, e:
print line
print 'Skipping line: {} from sample sheet: {}'.format(line, sample_sheet)
# if something went wrong
continue
else:
if sample not in project_flowcells:
project_flowcells[sample] = [fc]
else:
project_flowcells[sample].append(fc)
return project_flowcells
def get_organized(project):
sequenced = get_sequenced(project)
project_path = os.path.join('/proj/ngi2016003/nobackup/NGI/DATA', project)
organized = {}
for sample in sequenced:
for fc in sequenced[sample]:
# '*'' is libprep, can be 'A', 'B', etc
path = os.path.join(project_path, sample, '*', fc)
if glob.glob(path): # list of files
if sample not in organized:
organized[sample] = [fc]
elif fc not in organized[sample]:
organized[sample].append(fc)
# else: skip -> we don't want duplicates
return organized
def get_reprepped(project):
pass
def get_not_organized(project):
flowcells_samples = get_sequenced(project)
project_path = os.path.join('/proj/ngi2016003/nobackup/NGI/DATA', project)
not_organized = {}
for sample in flowcells_samples:
for fc in flowcells_samples[sample]:
# '*'' is libprep, can be 'A', 'B', etc
path = os.path.join(project_path, sample, '*', fc)
if not glob.glob(path):
if sample not in not_organized:
not_organized[sample] = [fc]
else:
not_organized[sample].append(fc)
return not_organized
if __name__ == '__main__':
parser = argparse.ArgumentParser("""Process one or more project and report basic statistiscs for it """)
parser.add_argument('projects', metavar='project', type=str, nargs='+', help='Projects we want to have statistics for (P1111)')
parser.add_argument('--project-status', help="reports number of samples, of samples-runs, analysed samples and delivered samples (work only if a single project is specified)", action='store_true')
parser.add_argument('--skip-header', help="skip header", action='store_true')
# added by Kate
parser.add_argument('--incoherent', help="Project-status but only for samples which have incoherent number of sequenced/organized/analyzed", action="store_true")
parser.add_argument('--undetermined', help="List of the samples which use undetermined", action="store_true")
parser.add_argument('--sequenced', help="List of all the sequenced samples", action="store_true")
parser.add_argument('--resequenced', help="List of samples that have been sequenced more than once, and the flowcells", action="store_true")
parser.add_argument('--organized', help="List of all the organized samples and the flowcells", action="store_true")
parser.add_argument('--to-organize', help="List of all the not-organized samples and flowcells", action="store_true")
parser.add_argument('--analyzed', help="List of all the analysed samples", action="store_true")
parser.add_argument('--to-analyze', help="List of samples that are ready to be analyzed", action="store_true")
parser.add_argument('--analysis-failed', help="List of all the samples with failed analysis (with exit code != 0 or empty exit code for samples not under analysis", action="store_true")
parser.add_argument('--under-analysis', help="List of the samples under analysis", action="store_true")
parser.add_argument('--under-qc', help="List of samples under qc. Use for projects without BP", action="store_true")
parser.add_argument('--low-coverage', help="List of analyzed samples with coverage below 28.5X", action="store_true")
parser.add_argument('--low-mapping', help="List of all the samples with mapping below 97 percent", action="store_true")
parser.add_argument('--flowcells', help="List of flowcells where each sample has been sequenced", action="store_true")
# todo
parser.add_argument('--high-duplicates', help="List of the samples with high percentage of duplicates (more than 15 percent)", action="store_true")
parser.add_argument('--to-sequence', help="List of the samples that are not sequenced AT ALL on ANY flowcells or lanes. Not implemented yet", action="store_true")
parser.add_argument('--qc-done', help="List of samples with completed QC. Not implemented yet", action="store_true")
parser.add_argument('--sample', '-s', type=str, help="Statistics for the specified sample. Not implemented yet")
args = parser.parse_args()
if not args.projects:
print "ERROR: project must be specified"
sys.exit()
# parse arguments
project = args.projects[0]
data_dir = "/proj/{}/nobackup/NGI/DATA/".format(uppmax_id)
analysis_dir = "/proj/{}/nobackup/NGI/ANALYSIS/".format(uppmax_id)
# output the result
if args.low_coverage:
all_results = find_results_from_francesco(uppmax_id, project)
samples = get_low_coverage(project, all_results)
if samples:
if not args.skip_header:
print "Coverage below 28.5X:"
for sample in sorted(samples.keys()):
print "{} {}".format(sample, samples[sample])
else:
print 'All samples are above 28.5X'
elif args.sequenced:
flowcells_samples = get_sequenced(project) # from incoming
if flowcells_samples:
if not args.skip_header:
print 'Sequenced samples'
for sample, flowcells in flowcells_samples.items():
print "{}: {}".format(sample, ' '.join(sorted(flowcells)))
else:
print 'No samples sequenced'
elif args.resequenced:
sequenced = get_sequenced(project)
resequenced = {}
for sample, flowcells in sequenced.items():
if len(flowcells) > 1:
resequenced[sample] = flowcells
if resequenced:
if not args.skip_header:
print 'Resequenced samples'
for sample, flowcells in sorted(resequenced.items(), key=lambda x:x[0]):
print "{}: {}".format(sample, ' '.join(sorted(flowcells)))
elif args.organized:
# todo: print by flowcell, not by sample
organized = get_organized(project)
if organized:
if not args.skip_header:
print 'Organized flowcells/samples:'
for sample, flowcells in sorted(organized.items(), key=lambda x:x[0]):
print "{}: {}".format(sample, ' '.join(sorted(flowcells)))
else:
print 'No organized samples'
elif args.to_organize:
result = get_not_organized(project)
if result:
if not args.skip_header:
print 'Samples to be organized:'
for sample, flowcells in result.items():
print "{}: {}".format(sample, ' '.join(flowcells))
else:
print 'All samples organized'
elif args.analyzed:
samples = find_results_from_francesco(uppmax_id, project)
analyzed_samples = []
sequenced_samples = []
for sample_id, sample in samples.items():
sequenced = sample.get('#Archived_runs', '')
organized = sample.get('#Data_runs', '')
analyzed = sample.get('#Analysis_runs', '')
if sequenced and organized and analyzed:
if sequenced == organized == analyzed:
analyzed_samples.append(sample_id)
if sample_id not in sequenced_samples:
sequenced_samples.append(sample_id)
if set(analyzed_samples) == set(sequenced_samples) != set([]):
print 'All {} samples analyzed'.format(len(analyzed_samples))
elif analyzed_samples:
if not args.skip_header:
print 'Analyzed samples:'
for sample in sorted(analyzed_samples):
print sample
if not args.skip_header:
print '{}/{} (analyzed/sequenced) samples have been analyzed.'.format(len(analyzed_samples), len(sequenced_samples))
print 'Check --to-analyze, --to-organize, --analysis-failed'
else:
print 'No analyzed samples'
elif args.undetermined:
result = get_samples_with_undetermined(data_dir, project)
if result:
if not args.skip_header:
print 'Organized with undetermined:'
for sample in sorted(result.keys()):
print "{}: {}".format(sample, ", ".join(fc for fc in result[sample]))
else:
print 'No undetermined used'
elif args.under_analysis:
result = get_samples_under_analysis(project)
if result:
if not args.skip_header:
print 'Samples under analysis:'
for sample in sorted(result):
print sample
else:
print 'No samples are being analyzed'
elif args.to_analyze:
samples = find_results_from_francesco(uppmax_id, project)
samples_to_analyze = []
for sample_id, sample in samples.items():
organized = sample.get('#Data_runs', '')
analyzed = sample.get('#Analysis_runs', '')
if organized > analyzed:
samples_to_analyze.append(sample_id)
if samples_to_analyze:
if not args.skip_header:
print 'Samples ready to be analyzed:'
for sample in sorted(samples_to_analyze):
print sample
else:
print 'No samples ready to be analyzed. Check --to-organize or --analyzed'
elif args.under_qc:
result = get_samples_under_qc(project)
if result:
if not args.skip_header:
print 'Samples under QC:'
for sample in sorted(result):
print sample
else:
print 'No samples under QC'
elif args.analysis_failed:
result = get_samples_with_failed_analysis(project, analysis_dir)
if result:
if not args.skip_header:
print 'Samples with failed analysis:'
for sample in sorted(result):
print sample, result[sample]
else:
print 'No analysis failed'
elif args.incoherent:
results_francesco = find_results_from_francesco(uppmax_id, project)
result = get_incoherent_samples(results_francesco)
if result:
if not args.skip_header:
print "Samples with incoherent runs:"
for sample in sorted(result.keys()):
numbers = result[sample]
print "{}\t{}\t{}\t{}".format(sample, numbers['sequenced'], numbers['organized'], numbers['analyzed'])
else:
print "All samples should be fine."
elif args.low_mapping:
result = find_results_from_francesco(uppmax_id, project)
low_mapping = {}
for sample_id, sample in result.items():
mapping = sample.get('%AlignedReads', '')
try:
mapping = float(mapping)
# add strange values as well (if something is wrong, we can see it)
except ValueError, e:
low_mapping[sample_id] = mapping
else:
if mapping < 97.0:
low_mapping[sample_id] = mapping
if low_mapping:
if not args.skip_header:
print "Samples with low mapping (<97%):"
for sample, mapping in sorted(low_mapping.items(), key=lambda x:x[1], reverse=True):
print sample, low_mapping[sample]
else:
print 'All samples mapped more than 97%'
elif args.flowcells:
result = get_sequenced(project)
if result:
for sample in sorted(result.keys()):
print '{} {}'.format(sample, ' '.join(result[sample]))
else:
print 'Something was wrong? No flowcells in the result'
elif args.to_sequence:
print "--to-sequence has not been implemented yet"
elif args.sample:
# todo sequenced on flowcells, organized on flowcells, undetermined, coverage, duplicates, mapping,
# todo: under analysis, analysis failed
# stats from Francesco's script - done
# + sequenced on flowcells
# + organized on flowcells - done
# sequenced, but not organized - done
# undetermined
result = find_results_from_francesco(uppmax_id, project)
sample = args.sample
sample_entry = result.get(sample, {})
if sample_entry:
if not args.skip_header:
print "sample_name\t#Reads\tRaw_coverage\t#Aligned_reads\t%Aligned_reads\tAlign_cov\tAutosomalCoverage\t%Dup\tMedianInsertSize"
print "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
sample,
sample_entry.get('#Reads'),
sample_entry.get('RowCov'),
sample_entry.get('#AlignedReads'),
sample_entry.get('%AlignedReads'),
sample_entry.get('AlignCov'),
sample_entry.get('AutosomalCoverage'),
sample_entry.get('%Dup'),
sample_entry.get('MedianInsertSize')
)
else:
'No stats for sample {}'.format(sample)
sequenced = get_sequenced(project)
flowcells = sequenced.get(sample, {})
if flowcells:
print 'Sequenced on flowcells:'
for flowcell in sorted(flowcells):
print ' {}'.format(flowcell)
else:
print 'Nothing sequenced'
organized = get_organized(project)
flowcells = organized.get(sample, {})
if flowcells:
print 'Organized on flowcells:'
for flowcell in sorted(flowcells):
print ' {}'.format(flowcell)
else:
print 'Nothing organized'
else:
result = find_results_from_francesco(uppmax_id, project)
if not args.skip_header:
print "sample_name\t#Reads\tRaw_coverage\t#Aligned_reads\t%Aligned_reads\tAlign_cov\tAutosomalCoverage\t%Dup\tMedianInsertSize"
for sample, sample_entry in result.items():
print "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
sample,
sample_entry.get('#Reads'),
sample_entry.get('RowCov'),
sample_entry.get('#AlignedReads'),
sample_entry.get('%AlignedReads'),
sample_entry.get('AlignCov'),
sample_entry.get('AutosomalCoverage'),
sample_entry.get('%Dup'),
sample_entry.get('MedianInsertSize')
) | mit |
JimCircadian/ansible | lib/ansible/modules/network/f5/bigip_pool_member.py | 8 | 29017 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# Copyright (c) 2013 Matt Hite <mhite@hotmail.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_pool_member
short_description: Manages F5 BIG-IP LTM pool members
description:
- Manages F5 BIG-IP LTM pool members via iControl SOAP API.
version_added: 1.4
options:
name:
description:
- Name of the node to create, or re-use, when creating a new pool member.
- This parameter is optional and, if not specified, a node name will be
created automatically from either the specified C(address) or C(fqdn).
version_added: 2.6
state:
description:
- Pool member state.
required: True
default: present
choices:
- present
- absent
- enabled
- disabled
- forced_offline
pool:
description:
- Pool name. This pool must exist.
required: True
partition:
description:
- Partition
default: Common
address:
description:
- IP address of the pool member. This can be either IPv4 or IPv6. When creating a
new pool member, one of either C(address) or C(fqdn) must be provided. This
parameter cannot be updated after it is set.
aliases:
- ip
- host
version_added: 2.2
fqdn:
description:
- FQDN name of the pool member. This can be any name that is a valid RFC 1123 DNS
name. Therefore, the only characters that can be used are "A" to "Z",
"a" to "z", "0" to "9", the hyphen ("-") and the period (".").
- FQDN names must include at lease one period; delineating the host from
the domain. ex. C(host.domain).
- FQDN names must end with a letter or a number.
- When creating a new pool member, one of either C(address) or C(fqdn) must be
provided. This parameter cannot be updated after it is set.
aliases:
- hostname
version_added: 2.6
port:
description:
- Pool member port.
- This value cannot be changed after it has been set.
required: True
connection_limit:
description:
- Pool member connection limit. Setting this to 0 disables the limit.
description:
description:
- Pool member description.
rate_limit:
description:
- Pool member rate limit (connections-per-second). Setting this to 0
disables the limit.
ratio:
description:
- Pool member ratio weight. Valid values range from 1 through 100.
New pool members -- unless overridden with this value -- default
to 1.
preserve_node:
description:
- When state is C(absent) attempts to remove the node that the pool
member references.
- The node will not be removed if it is still referenced by other pool
members. If this happens, the module will not raise an error.
- Setting this to C(yes) disables this behavior.
type: bool
version_added: 2.1
priority_group:
description:
- Specifies a number representing the priority group for the pool member.
- When adding a new member, the default is 0, meaning that the member has no priority.
- To specify a priority, you must activate priority group usage when you
create a new pool or when adding or removing pool members. When activated,
the system load balances traffic according to the priority group number
assigned to the pool member.
- The higher the number, the higher the priority, so a member with a priority
of 3 has higher priority than a member with a priority of 1.
version_added: 2.5
fqdn_auto_populate:
description:
- Specifies whether the system automatically creates ephemeral nodes using
the IP addresses returned by the resolution of a DNS query for a node
defined by an FQDN.
- When C(enabled), the system generates an ephemeral node for each IP address
returned in response to a DNS query for the FQDN of the node. Additionally,
when a DNS response indicates the IP address of an ephemeral node no longer
exists, the system deletes the ephemeral node.
- When C(disabled), the system resolves a DNS query for the FQDN of the node
with the single IP address associated with the FQDN.
- When creating a new pool member, the default for this parameter is C(yes).
- This parameter is ignored when C(reuse_nodes) is C(yes).
type: bool
version_added: 2.6
reuse_nodes:
description:
- Reuses node definitions if requested.
default: yes
type: bool
version_added: 2.6
session_state:
description:
- Set new session availability status for pool member.
- This parameter is deprecated and will be removed in Ansible 2.7. Use C(state)
C(enabled) or C(disabled).
version_added: 2.0
choices:
- enabled
- disabled
monitor_state:
description:
- Set monitor availability status for pool member.
- This parameter is deprecated and will be removed in Ansible 2.7. Use C(state)
C(enabled) or C(disabled).
version_added: 2.0
choices:
- enabled
- disabled
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add pool member
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
delegate_to: localhost
- name: Modify pool member ratio and description
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
ratio: 1
description: nginx server
delegate_to: localhost
- name: Remove pool member from pool
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: absent
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
- name: Force pool member offline
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: forced_offline
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
- name: Create members with priority groups
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
pool: my-pool
partition: Common
host: "{{ item.address }}"
name: "{{ item.name }}"
priority_group: "{{ item.priority_group }}"
port: 80
delegate_to: localhost
loop:
- host: 1.1.1.1
name: web1
priority_group: 4
- host: 2.2.2.2
name: web2
priority_group: 3
- host: 3.3.3.3
name: web3
priority_group: 2
- host: 4.4.4.4
name: web4
priority_group: 1
'''
RETURN = '''
rate_limit:
description: The new rate limit, in connections per second, of the pool member.
returned: changed
type: int
sample: 100
connection_limit:
description: The new connection limit of the pool member
returned: changed
type: int
sample: 1000
description:
description: The new description of pool member.
returned: changed
type: string
sample: My pool member
ratio:
description: The new pool member ratio weight.
returned: changed
type: int
sample: 50
priority_group:
description: The new priority group.
returned: changed
type: int
sample: 3
fqdn_auto_populate:
description: Whether FQDN auto population was set on the member or not.
returned: changed
type: bool
sample: True
fqdn:
description: The FQDN of the pool member.
returned: changed
type: string
sample: foo.bar.com
address:
description: The address of the pool member.
returned: changed
type: string
sample: 1.2.3.4
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import is_valid_hostname
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import is_valid_hostname
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
class Parameters(AnsibleF5Parameters):
api_map = {
'rateLimit': 'rate_limit',
'connectionLimit': 'connection_limit',
'priorityGroup': 'priority_group',
}
api_attributes = [
'rateLimit', 'connectionLimit', 'description', 'ratio', 'priorityGroup',
'address', 'fqdn', 'session', 'state'
]
returnables = [
'rate_limit', 'connection_limit', 'description', 'ratio', 'priority_group',
'fqdn_auto_populate', 'session', 'state', 'fqdn', 'address'
]
updatables = [
'rate_limit', 'connection_limit', 'description', 'ratio', 'priority_group',
'fqdn_auto_populate', 'state'
]
class ModuleParameters(Parameters):
@property
def full_name(self):
delimiter = ':'
try:
addr = netaddr.IPAddress(self.full_name_dict['name'])
if addr.version == 6:
delimiter = '.'
except netaddr.AddrFormatError:
pass
return '{0}{1}{2}'.format(self.full_name_dict['name'], delimiter, self.port)
@property
def full_name_dict(self):
if self._values['name'] is None:
name = self._values['address'] if self._values['address'] else self._values['fqdn']
else:
name = self._values['name']
return dict(
name=name,
port=self.port
)
@property
def node_name(self):
return self.full_name_dict['name']
@property
def fqdn_name(self):
return self._values['fqdn']
@property
def fqdn(self):
result = {}
if self.fqdn_auto_populate:
result['autopopulate'] = 'enabled'
else:
result['autopopulate'] = 'disabled'
if self._values['fqdn'] is None:
return result
if not is_valid_hostname(self._values['fqdn']):
raise F5ModuleError(
"The specified 'fqdn' is not a valid hostname."
)
result['tmName'] = self._values['fqdn']
return result
@property
def pool(self):
return fq_name(self.want.partition, self._values['pool'])
@property
def port(self):
if 0 > int(self._values['port']) or int(self._values['port']) > 65535:
raise F5ModuleError(
"Valid ports must be in range 0 - 65535"
)
return int(self._values['port'])
@property
def state(self):
# TODO(Remove all of this state craziness in 2.7)
if self.session_state is not None or self.monitor_state is not None:
if self._values['state'] in ['enabled', 'disabled', 'forced_offline']:
self._values['__warnings'].append([{
'msg': "'session_state' is deprecated and will be ignored in favor of 'state'.",
'version': '2.7'
}])
return self._values['state']
else:
if self.session_state is not None:
self._values['__warnings'].append([{
'msg': "'session_state' is deprecated and will be removed in the future. Use 'state'.",
'version': '2.7'
}])
elif self.monitor_state is not None:
self._values['__warnings'].append([{
'msg': "'monitor_state' is deprecated and will be removed in the future. Use 'state'.",
'version': '2.7'
}])
if self.session_state == 'enabled' and self.monitor_state == 'enabled':
return 'enabled'
elif self.session_state == 'disabled' and self.monitor_state == 'enabled':
return 'disabled'
else:
return 'forced_offline'
return self._values['state']
@property
def address(self):
if self._values['address'] is None:
return None
elif self._values['address'] == 'any6':
return 'any6'
try:
addr = netaddr.IPAddress(self._values['address'])
return str(addr)
except netaddr.AddrFormatError:
raise F5ModuleError(
"The specified 'address' value is not a valid IP address."
)
class ApiParameters(Parameters):
@property
def allow(self):
if self._values['allow'] is None:
return ''
if self._values['allow'][0] == 'All':
return 'all'
allow = self._values['allow']
result = list(set([str(x) for x in allow]))
result = sorted(result)
return result
@property
def rate_limit(self):
if self._values['rate_limit'] is None:
return None
if self._values['rate_limit'] == 'disabled':
return 0
return int(self._values['rate_limit'])
@property
def state(self):
if self._values['state'] in ['user-up', 'unchecked', 'fqdn-up-no-addr'] and self._values['session'] in ['user-enabled']:
return 'present'
elif self._values['state'] in ['down', 'up'] and self._values['session'] == 'monitor-enabled':
return 'present'
elif self._values['state'] in ['user-down'] and self._values['session'] in ['user-disabled']:
return 'forced_offline'
else:
return 'disabled'
class NodeApiParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def ssl_cipher_suite(self):
default = ':'.join(sorted(Parameters._ciphers.split(':')))
if self._values['ssl_cipher_suite'] == default:
return 'default'
else:
return self._values['ssl_cipher_suite']
@property
def fqdn_auto_populate(self):
if self._values['fqdn'] is None:
return None
if 'autopopulate' in self._values['fqdn']:
if self._values['fqdn']['autopopulate'] == 'enabled':
return True
return False
@property
def fqdn(self):
if self._values['fqdn'] is None:
return None
if 'tmName' in self._values['fqdn']:
return self._values['fqdn']['tmName']
@property
def state(self):
if self._values['state'] in ['user-up', 'unchecked', 'fqdn-up-no-addr'] and self._values['session'] in ['user-enabled']:
return 'present'
elif self._values['state'] in ['down', 'up'] and self._values['session'] == 'monitor-enabled':
return 'present'
elif self._values['state'] in ['user-down'] and self._values['session'] in ['user-disabled']:
return 'forced_offline'
else:
return 'disabled'
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.state == self.have.state:
return None
if self.want.state == 'forced_offline':
return {
'state': 'user-down',
'session': 'user-disabled'
}
elif self.want.state == 'disabled':
return {
'state': 'user-up',
'session': 'user-disabled'
}
elif self.want.state in ['present', 'enabled']:
return {
'state': 'user-up',
'session': 'user-enabled'
}
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'present', 'enabled', 'disabled', 'forced_offline']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
try:
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
except Exception:
raise F5ModuleError('The specified pool does not exist')
result = pool.members_s.members.exists(
name=self.want.full_name,
partition=self.want.partition
)
return result
def node_exists(self):
resource = self.client.api.tm.ltm.nodes.node.exists(
name=self.want.node_name,
partition=self.want.partition
)
return resource
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if not self.want.preserve_node:
self.remove_node_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def _set_host_by_name(self):
try:
netaddr.IPAddress(self.want.name)
self.want.update({
'fqdn': None,
'address': self.want.name
})
except netaddr.AddrFormatError:
if not is_valid_hostname(self.want.name):
raise F5ModuleError(
"'name' is neither a valid IP address or FQDN name."
)
self.want.update({
'fqdn': self.want.name,
'address': None
})
def _update_api_state_attributes(self):
if self.want.state == 'forced_offline':
self.want.update({
'state': 'user-down',
'session': 'user-disabled',
# TODO(Remove in 2.7)
'session_state': None,
'monitor_state': None
})
elif self.want.state == 'disabled':
self.want.update({
'state': 'user-up',
'session': 'user-disabled',
# TODO(Remove in 2.7)
'session_state': None,
'monitor_state': None
})
elif self.want.state in ['present', 'enabled']:
self.want.update({
'state': 'user-up',
'session': 'user-enabled',
# TODO(Remove in 2.7)
'session_state': None,
'monitor_state': None
})
def _update_address_with_existing_nodes(self):
try:
have = self.read_current_node_from_device(self.want.node_name)
if self.want.fqdn_auto_populate and self.want.reuse_nodes:
self.module.warn("'fqdn_auto_populate' is discarded in favor of the re-used node's auto-populate setting.")
self.want.update({
'fqdn_auto_populate': True if have.fqdn['autopopulate'] == 'enabled' else False
})
if 'tmName' in have.fqdn:
self.want.update({
'fqdn': have.fqdn['tmName'],
'address': 'any6'
})
else:
self.want.update({
'address': have.address
})
except Exception:
return None
def create(self):
if self.want.reuse_nodes:
self._update_address_with_existing_nodes()
if self.want.name and not any(x for x in [self.want.address, self.want.fqdn_name]):
self._set_host_by_name()
self._update_api_state_attributes()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
pool.members_s.members.create(
name=self.want.full_name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.changes.api_params()
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
resource = pool.members_s.members.load(
name=self.want.full_name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
elif not self.want.preserve_node and self.node_exists():
return self.remove_node_from_device()
return False
def remove_from_device(self):
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
resource = pool.members_s.members.load(
name=self.want.full_name,
partition=self.want.partition
)
if resource:
resource.delete()
def remove_node_from_device(self):
resource = self.client.api.tm.ltm.nodes.node.load(
name=self.want.node_name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
pool = self.client.api.tm.ltm.pools.pool.load(
name=self.want.pool,
partition=self.want.partition
)
resource = pool.members_s.members.load(
name=self.want.full_name,
partition=self.want.partition
)
return ApiParameters(params=resource.attrs)
def read_current_node_from_device(self, node):
resource = self.client.api.tm.ltm.nodes.node.load(
name=node,
partition=self.want.partition
)
return NodeApiParameters(params=resource.attrs)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
pool=dict(required=True),
address=dict(aliases=['host', 'ip']),
fqdn=dict(
aliases=['hostname']
),
name=dict(),
port=dict(type='int', required=True),
connection_limit=dict(type='int'),
description=dict(),
rate_limit=dict(type='int'),
ratio=dict(type='int'),
preserve_node=dict(type='bool'),
priority_group=dict(type='int'),
state=dict(
default='present',
choices=['absent', 'present', 'enabled', 'disabled', 'forced_offline']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
fqdn_auto_populate=dict(type='bool'),
reuse_nodes=dict(type='bool', default=True),
# Deprecated params
# TODO(Remove in 2.7)
session_state=dict(
choices=['enabled', 'disabled'],
removed_in_version=2.7,
),
monitor_state=dict(
choices=['enabled', 'disabled'],
removed_in_version=2.7,
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['address', 'fqdn']
]
self.required_one_of = [
['name', 'address', 'fqdn'],
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
jocave/snapcraft | snapcraft/internal/libraries.py | 6 | 3011 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import glob
import logging
import os
import platform
import subprocess
from snapcraft.internal import common
logger = logging.getLogger(__name__)
def determine_ld_library_path(root):
# If more ld.so.conf files need to be supported, add them here.
ld_config_globs = {
'{}/usr/lib/*/mesa*/ld.so.conf'.format(root)
}
ld_library_paths = []
for this_glob in ld_config_globs:
for ld_conf_file in glob.glob(this_glob):
ld_library_paths.extend(_extract_ld_library_paths(ld_conf_file))
return [root + path for path in ld_library_paths]
def _extract_ld_library_paths(ld_conf_file):
# From the ldconfig manpage, paths can be colon-, space-, tab-, newline-,
# or comma-separated.
path_delimiters = re.compile(r'[:\s,]')
comments = re.compile(r'#.*$')
paths = []
with open(ld_conf_file, 'r') as f:
for line in f:
# Remove comments from line
line = comments.sub('', line).strip()
if line:
paths.extend(path_delimiters.split(line))
return paths
_libraries = None
def _get_system_libs():
global _libraries
if _libraries:
return _libraries
release = platform.linux_distribution()[1]
lib_path = os.path.join(common.get_librariesdir(), release)
if not os.path.exists(lib_path):
logger.warning('No libraries to exclude from this release')
return frozenset()
with open(lib_path) as fn:
_libraries = frozenset(fn.read().split())
return _libraries
def get_dependencies(elf):
"""Return a list of libraries that are needed to satisfy elf's runtime.
This may include libraries contained within the project.
"""
logger.debug('Getting dependencies for {!r}'.format(elf))
ldd_out = ''
try:
ldd_out = common.run_output(['ldd', elf]).split('\n')
except subprocess.CalledProcessError:
logger.warning(
'Unable to determine library dependencies for {!r}'.format(elf))
return []
ldd_out = [l.split() for l in ldd_out]
ldd_out = [l[2] for l in ldd_out if len(l) > 2 and os.path.exists(l[2])]
# Now lets filter out what would be on the system
system_libs = _get_system_libs()
libs = [l for l in ldd_out if not os.path.basename(l) in system_libs]
return libs
| gpl-3.0 |
funson/rt-xen | tools/python/xen/web/httpserver.py | 49 | 10765 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2006 XenSource Ltd.
#============================================================================
import threading
import string
import socket
import types
from urllib import quote, unquote
import os
import os.path
import fcntl
from xen.xend import sxp
from xen.xend.Args import ArgError
from xen.xend.XendError import XendError
import http
import unix
from resource import Resource, ErrorPage
from SrvDir import SrvDir
class ThreadRequest:
"""A request to complete processing using a thread.
"""
def __init__(self, processor, req, fn, args, kwds):
self.processor = processor
self.req = req
self.fn = fn
self.args = args
self.kwds = kwds
def run(self):
self.processor.setInThread()
thread = threading.Thread(target=self.main)
thread.setDaemon(True)
thread.start()
def call(self):
try:
self.fn(*self.args, **self.kwds)
except SystemExit:
raise
except Exception, ex:
self.req.resultErr(ex)
self.req.finish()
def main(self):
self.call()
self.processor.process()
class RequestProcessor:
"""Processor for requests on a connection to an http server.
Requests are executed synchonously unless they ask for a thread by returning
a ThreadRequest.
"""
done = False
inThread = False
def __init__(self, server, sock, addr):
self.server = server
self.sock = sock
self.srd = sock.makefile('rb')
self.srw = sock.makefile('wb')
self.srvaddr = server.getServerAddr()
def isInThread(self):
return self.inThread
def setInThread(self):
self.inThread = True
def getServer(self):
return self.server
def getRequest(self):
return HttpServerRequest(self, self.srvaddr, self.srd, self.srw)
def close(self):
try:
self.sock.close()
except:
pass
def finish(self):
self.done = True
self.close()
def process(self):
while not self.done:
req = self.getRequest()
res = req.process()
if isinstance(res, ThreadRequest):
if self.isInThread():
res.call()
else:
res.run()
break
else:
req.finish()
class HttpServerRequest(http.HttpRequest):
"""A single request to an http server.
"""
def __init__(self, processor, addr, srd, srw):
self.processor = processor
self.prepath = ''
http.HttpRequest.__init__(self, addr, srd, srw)
def getServer(self):
return self.processor.getServer()
def process(self):
"""Process the request. If the return value is a ThreadRequest
it is evaluated in a thread.
"""
try:
self.prepath = []
self.postpath = map(unquote, string.split(self.request_path[1:], '/'))
resource = self.getResource()
return self.render(resource)
except SystemExit:
raise
except Exception, ex:
self.processError(ex)
def processError(self, ex):
import traceback; traceback.print_exc()
self.sendError(http.INTERNAL_SERVER_ERROR, msg=str(ex))
self.setCloseConnection('close')
def finish(self):
self.sendResponse()
if self.close_connection:
self.processor.finish()
def prePathURL(self):
url_host = self.getRequestHostname()
port = self.getPort()
if self.isSecure():
url_proto = "https"
default_port = 443
else:
url_proto = "http"
default_port = 80
if port != default_port:
url_host += (':%d' % port)
url_path = quote(string.join(self.prepath, '/'))
return ('%s://%s/%s' % (url_proto, url_host, url_path))
def getResource(self):
return self.getServer().getResource(self)
def render(self, resource):
val = None
if resource is None:
self.sendError(http.NOT_FOUND)
else:
try:
while True:
val = resource.render(self)
if not isinstance(val, Resource):
break
val = self.result(val)
except SystemExit:
raise
except Exception, ex:
self.resultErr(ex)
return val
def threadRequest(self, _fn, *_args, **_kwds):
"""Create a request to finish request processing in a thread.
Use this to create a ThreadRequest to return from rendering a
resource if you need a thread to complete processing.
"""
return ThreadRequest(self.processor, self, _fn, _args, _kwds)
def result(self, val):
if isinstance(val, Exception):
return self.resultErr(val)
else:
return self.resultVal(val)
def resultVal(self, val):
"""Callback to complete the request.
@param val: the value
"""
if val is None:
return val
elif isinstance(val, ThreadRequest):
return val
elif self.useSxp():
self.setHeader("Content-Type", sxp.mime_type)
sxp.show(val, out=self)
else:
self.write('<html><head></head><body>')
self.printPath()
if isinstance(val, types.ListType):
self.write('<code><pre>')
PrettyPrint.prettyprint(val, out=self)
self.write('</pre></code>')
else:
self.write(str(val))
self.write('</body></html>')
return None
def resultErr(self, err):
"""Error callback to complete a request.
@param err: the error
"""
if not isinstance(err, (ArgError, sxp.ParseError, XendError)):
raise
#log.exception("op=%s: %s", op, str(err))
if self.useSxp():
self.setHeader("Content-Type", sxp.mime_type)
sxp.show(['xend.err', str(err)], out=self)
else:
self.setHeader("Content-Type", "text/plain")
self.write('Error ')
self.write(': ')
self.write(str(err))
return None
def useSxp(self):
"""Determine whether to send an SXP response to a request.
Uses SXP if there is no User-Agent, no Accept, or application/sxp is in Accept.
returns 1 for SXP, 0 otherwise
"""
ok = 0
user_agent = self.getHeader('User-Agent')
accept = self.getHeader('Accept')
if (not user_agent) or (not accept) or (accept.find(sxp.mime_type) >= 0):
ok = 1
return ok
def printPath(self):
pathlist = [x for x in self.prepath if x != '' ]
s = "/"
self.write('<h1><a href="/">/</a>')
for x in pathlist:
s += x + "/"
self.write(' <a href="%s">%s</a>/' % (s, x))
self.write("</h1>")
class HttpServerClient:
def __init__(self, server, sock, addr):
self.server = server
self.sock = sock
self.addr = addr
def process(self):
thread = threading.Thread(target=self.doProcess)
thread.setDaemon(True)
thread.start()
def doProcess(self):
try:
rp = RequestProcessor(self.server, self.sock, self.addr)
rp.process()
except SystemExit:
raise
except Exception, ex:
print 'HttpServer>processRequest> exception: ', ex
try:
self.sock.close()
except:
pass
class HttpServer:
backlog = 5
def __init__(self, root, interface, port=8080):
self.root = root
self.interface = interface
self.port = port
# ready indicates when we are ready to begin accept connections
# it should be set after a successful bind
self.ready = False
self.closed = False
def run(self):
self.bind()
self.listen()
self.ready = True
while not self.closed:
(sock, addr) = self.accept()
cl = HttpServerClient(self, sock, addr)
cl.process()
def stop(self):
self.close()
def bind(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
flags = fcntl.fcntl(self.socket.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.socket.fileno(), fcntl.F_SETFD, flags)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.interface, self.port))
def listen(self):
self.socket.listen(self.backlog)
def accept(self):
return self.socket.accept()
def close(self):
self.closed = True
self.ready = False
# shutdown socket explicitly to allow reuse
try:
self.socket.shutdown(2)
except socket.error:
pass
try:
self.socket.close()
except socket.error:
pass
def getServerAddr(self):
return (socket.gethostname(), self.port)
def getResource(self, req):
return self.root.getRequestResource(req)
def shutdown(self):
self.close()
class UnixHttpServer(HttpServer):
def __init__(self, root, path):
HttpServer.__init__(self, root, 'localhost')
self.path = path
def bind(self):
self.socket = unix.bind(self.path)
flags = fcntl.fcntl(self.socket.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.socket.fileno(), fcntl.F_SETFD, flags)
| gpl-2.0 |
ytjiang/django | tests/auth_tests/test_decorators.py | 46 | 3774 | from django.conf import settings
from django.contrib.auth import models
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from .test_views import AuthViewsTestCase
@override_settings(ROOT_URLCONF='auth_tests.urls')
class LoginRequiredTestCase(AuthViewsTestCase):
"""
Tests the login_required decorators
"""
def testCallable(self):
"""
Check that login_required is assignable to callable objects.
"""
class CallableView(object):
def __call__(self, *args, **kwargs):
pass
login_required(CallableView())
def testView(self):
"""
Check that login_required is assignable to normal views.
"""
def normal_view(request):
pass
login_required(normal_view)
def testLoginRequired(self, view_url='/login_required/', login_url=None):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
if login_url is None:
login_url = settings.LOGIN_URL
response = self.client.get(view_url)
self.assertEqual(response.status_code, 302)
self.assertIn(login_url, response.url)
self.login()
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
def testLoginRequiredNextUrl(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator with a login_url set.
"""
self.testLoginRequired(view_url='/login_required_login_url/',
login_url='/somewhere/')
class PermissionsRequiredDecoratorTest(TestCase):
"""
Tests for the permission_required decorator
"""
def setUp(self):
self.user = models.User.objects.create(username='joe', password='qwerty')
self.factory = RequestFactory()
# Add permissions auth.add_customuser and auth.change_customuser
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
self.user.user_permissions.add(*perms)
def test_many_permissions_pass(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_single_permission_pass(self):
@permission_required('auth.add_customuser')
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_permissioned_denied_redirect(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser', 'non-existent-permission'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 302)
def test_permissioned_denied_exception_raised(self):
@permission_required([
'auth.add_customuser', 'auth.change_customuser', 'non-existent-permission'
], raise_exception=True)
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
self.assertRaises(PermissionDenied, a_view, request)
| bsd-3-clause |
technologiescollege/Blockly-rduino-communication | scripts/Lib/site-packages/setuptools/command/upload.py | 210 | 1077 | import getpass
from distutils.command import upload as orig
class upload(orig.upload):
"""
Override default upload behavior to obtain password
in a variety of different ways.
"""
def finalize_options(self):
orig.upload.finalize_options(self)
# Attempt to obtain password. Short circuit evaluation at the first
# sign of success.
self.password = (
self.password or
self._load_password_from_keyring() or
self._prompt_for_password()
)
def _load_password_from_keyring(self):
"""
Attempt to load password from keyring. Suppress Exceptions.
"""
try:
keyring = __import__('keyring')
return keyring.get_password(self.repository, self.username)
except Exception:
pass
def _prompt_for_password(self):
"""
Prompt for a password on the tty. Suppress Exceptions.
"""
try:
return getpass.getpass()
except (Exception, KeyboardInterrupt):
pass
| gpl-3.0 |
coinkeeper/2015-06-22_18-39_feathercoin | contrib/linearize/linearize.py | 145 | 3351 | #!/usr/bin/python
#
# linearize.py: Construct a linear, no-fork, best version of the blockchain.
#
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def getblock(rpc, settings, n):
hash = rpc.getblockhash(n)
hexdata = rpc.getblock(hash, False)
data = hexdata.decode('hex')
return data
def get_blocks(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
outf = open(settings['output'], 'ab')
for height in xrange(settings['min_height'], settings['max_height']+1):
data = getblock(rpc, settings, height)
outhdr = settings['netmagic']
outhdr += struct.pack("<i", len(data))
outf.write(outhdr)
outf.write(data)
if (height % 1000) == 0:
sys.stdout.write("Wrote block " + str(height) + "\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'output' not in settings:
settings['output'] = 'bootstrap.dat'
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 279000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_blocks(settings)
| mit |
suiyuan2009/tensorflow | tensorflow/contrib/data/python/kernel_tests/iterator_ops_test.py | 5 | 15518 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class IteratorTest(test.TestCase):
def testAttemptingGradientsRaiseExceptions(self):
component = constant_op.constant([1])
side = constant_op.constant(0)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset.make_one_shot_iterator().get_next()
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, component)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, side)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, [component, side])
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for i in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % i
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(inter_op_parallelism_threads=1,
use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.test_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testSimpleSharedResource(self):
components = (
np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64)
)
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = dataset_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
get_next = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = dataset_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual([None], iterator.output_shapes.as_list())
with self.test_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = dataset_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64), [None])
# Test validation of dataset argument.
iterator = dataset_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors((constant_op.constant(
[1, 2, 3], dtype=dtypes.int32), constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = dataset_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64), constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64))))
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = dataset_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.test_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1, sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2, sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3, sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle})
if __name__ == "__main__":
test.main()
| apache-2.0 |
vene/marseille | experiments/exp_rnn.py | 1 | 5162 | import os
import dill
import numpy as np
from sklearn.model_selection import KFold
from marseille.custom_logging import logging
from marseille.datasets import get_dataset_loader, load_embeds
from marseille.io import cache_fname
from marseille.argrnn import ArgumentLSTM
def argrnn_cv_score(dataset, dynet_weight_decay, mlp_dropout,
rnn_dropout, prop_layers, class_weight, constraints,
compat_features, second_order):
fn = cache_fname("argrnn_cv_score", (dataset, dynet_weight_decay,
mlp_dropout, rnn_dropout, prop_layers,
class_weight, constraints,
compat_features, second_order))
if os.path.exists(fn):
logging.info("Cached file already exists.")
with open(fn, "rb") as f:
return dill.load(f)
load, ids = get_dataset_loader(dataset, split="train")
embeds = load_embeds(dataset)
grandparent_layers = 1 if second_order and dataset == 'ukp' else 0
coparent_layers = 1 if second_order else 0
sibling_layers = 1 if second_order and dataset == 'cdcp' else 0
scores = []
all_Y_pred = []
score_at_iter = [10, 25, 50, 75, 100]
n_folds = 5 if dataset == 'ukp' else 3
for k, (tr, val) in enumerate(KFold(n_folds).split(ids)):
docs_train = list(load(ids[tr]))
docs_val = list(load(ids[val]))
Y_train = [doc.label for doc in docs_train]
Y_val = [doc.label for doc in docs_val]
rnn = ArgumentLSTM(lstm_dropout=rnn_dropout,
mlp_dropout=mlp_dropout,
compat_features=compat_features,
constraints=constraints,
prop_mlp_layers=prop_layers,
coparent_layers=coparent_layers,
grandparent_layers=grandparent_layers,
sibling_layers=sibling_layers,
class_weight=class_weight,
second_order_multilinear=True,
max_iter=100,
score_at_iter=score_at_iter,
n_mlp=128,
n_lstm=128,
lstm_layers=2,
link_mlp_layers=1,
embeds=embeds,
exact_inference=False,
link_bilinear=True)
rnn.fit(docs_train, Y_train, docs_val, Y_val)
Y_val_pred = rnn.predict(docs_val)
all_Y_pred.extend(Y_val_pred)
scores.append(rnn.scores_)
with open(fn, "wb") as f:
dill.dump((scores, score_at_iter, all_Y_pred), f)
return scores, score_at_iter, all_Y_pred
if __name__ == '__main__':
from docopt import docopt
usage = """
Usage:
exp_rnn (cdcp|ukp) [\
--dynet-seed N --dynet-weight-decay N --dynet-mem N --prop-layers=N \
--rnn-dropout=N --mlp-dropout=N --balanced --constraints --strict \
--compat-features --second-order]
Options:
--dynet-seed=N random number generator seed for dynet library
--dynet-weight-decay=N global weight decay amount for dynet library
--dynet-mem=N memory pool size for dynet
--prop-layers=N number of prop classifier layers. [default: 2]
--rnn-dropout=N dropout ratio in lstm. [default: 0.0]
--mlp-dropout=N dropout ratio in mlp. [default: 0.1]
--balanced whether to reweight class costs by freq
--constraints whether to constrain the decoding
--strict whether to use strict domain constraints
--compat-features whether to use features for compat factors
--second-order whether to use coparent / grandpa / siblings
"""
args = docopt(usage)
dataset = 'cdcp' if args['cdcp'] else 'ukp'
prop_layers = int(args['--prop-layers'])
rnn_dropout = float(args['--rnn-dropout'])
mlp_dropout = float(args['--mlp-dropout'])
cw = 'balanced' if args['--balanced'] else None
if args['--constraints']:
constraints = dataset
if args['--strict']:
constraints += '+strict'
else:
constraints = ""
scores, score_at_iter, _ = argrnn_cv_score(dataset,
args['--dynet-weight-decay'],
mlp_dropout,
rnn_dropout,
prop_layers,
cw,
constraints,
args['--compat-features'],
args['--second-order'])
for iter, score in zip(score_at_iter, np.mean(scores, axis=0)):
print("iter={} "
"Link: {:.3f}/{:.3f} "
"Node: {:.3f}/{:.3f} "
"accuracy {:.3f}".format(iter, *score),
)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.