repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
Giangblackk/hanoi_road_map_analysis
|
preprocess/from_road_to_graph.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 17:32:10 2017
@author: giangblackk
"""
from osgeo import ogr, osr
import networkx as nx
import numpy as np
def calculateGeometryLength(pointList, sourceSRS, destSRS):
line = ogr.Geometry(ogr.wkbLineString)
transform = osr.CoordinateTransformation(sourceSRS,destSRS)
for point in pointList:
line.AddPoint(point[0],point[1])
line.Transform(transform)
return line.Length()
# target srs for road length computation
target_srs = osr.SpatialReference()
target_srs.ImportFromProj4('+proj=utm +zone=48 +ellps=WGS84 +datum=WGS84 +units=m +no_defs ')
# read source dataset
highwayFileName = './roaddata/highway_line_singlepart.shp'
dataSource = ogr.Open(highwayFileName)
layer = dataSource.GetLayer(0)
source_srs = layer.GetSpatialRef()
featureCount = layer.GetFeatureCount()
print('featureCount: ', featureCount)
# layer.SetAttributeFilter("ONEWAY NOT IN ('yes', 'no','-1')")
# layer.SetAttributeFilter("ONEWAY IN ('-1','yes','no')")
# get attribute list
attributeList = []
layerDefinition = layer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
attributeList.append(fieldName)
attributeList.remove('TOLL')
attributeList.remove('TRACKTYPE')
attributeList.remove('DISUSED')
# create graph
G = nx.DiGraph()
nodeList = []
i = 0
for feature in layer:
geometry = feature.geometry()
geometry_projected = geometry.Clone()
geometry_projected.TransformTo(target_srs)
feature_length = geometry_projected.Length()
pointCount = geometry.GetPointCount()
pointList = geometry.GetPoints()
### first point ###########################################################
firstPoint = pointList[0]
if not firstPoint in nodeList:
nodeList.append(firstPoint)
G.add_node(i, lng=firstPoint[0], lat=firstPoint[1])
firstNodeID = i
i = i + 1
else:
for nodeidx in G.nodes_iter():
if G.node[nodeidx]['lng'] == firstPoint[0] and G.node[nodeidx]['lat'] == firstPoint[1]:
firstNodeID = nodeidx
### last point ############################################################
lastPoint = pointList[-1]
if not lastPoint in nodeList:
nodeList.append(lastPoint)
G.add_node(i, lng=lastPoint[0], lat=lastPoint[1])
lastNodeID = i
i = i + 1
else:
for nodeidx in G.nodes_iter():
if G.node[nodeidx]['lng'] == lastPoint[0] and G.node[nodeidx]['lat'] == lastPoint[1]:
lastNodeID = nodeidx
### if first point is same as last point, remove due to loop ##############
if firstNodeID == lastNodeID or firstPoint == lastPoint:
G.remove_node(firstNodeID)
nodeList.remove(firstPoint)
continue
### add edges between nodes ###############################################
middlePointList = pointList[1:-1]
if firstNodeID in middlePointList or lastNodeID in middlePointList:
# G.remove_node(firstNodeID)
# nodeList.remove(firstPoint)
# G.remove_node(lastNodeID)
# nodeList.remove(lastPoint)
continue
### create link ###########################################################
if feature.GetField('ONEWAY') == '-1':
G.add_edge(lastNodeID, firstNodeID)
for attribute in attributeList:
G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]
G[lastNodeID][firstNodeID]['length'] = feature_length
elif feature.GetField('ONEWAY') == 'yes':
G.add_edge(firstNodeID, lastNodeID)
for attribute in attributeList:
G[firstNodeID][lastNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[firstNodeID][lastNodeID]['middle'] = middlePointList
G[firstNodeID][lastNodeID]['length'] = feature_length
else:
G.add_edge(firstNodeID, lastNodeID)
G.add_edge(lastNodeID, firstNodeID)
for attribute in attributeList:
G[firstNodeID][lastNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''
G[firstNodeID][lastNodeID]['middle'] = middlePointList
G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]
G[firstNodeID][lastNodeID]['length'] = feature_length
G[lastNodeID][firstNodeID]['length'] = feature_length
### intersect processing ##################################################
for edge in G.edges():
headID = edge[0]
tailID = edge[1]
attributeDict = G[headID][tailID]
middle = attributeDict['middle']
if firstPoint in middle:
if headID == firstNodeID or firstNodeID == tailID:
continue
indexFirstPoint = middle.index(firstPoint)
# copy attributes
attributeDictPart1 = attributeDict.copy()
attributeDictPart2 = attributeDict.copy()
# recalculate middle
attributeDictPart1['middle'] = middle[0:indexFirstPoint]
attributeDictPart2['middle'] = middle[indexFirstPoint+1:]
# recalucate length
roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]
roadPart1.extend(middle[0:indexFirstPoint+1])
roadPart2 = middle[indexFirstPoint:]
roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))
attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)
attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)
G.remove_edge(headID, tailID)
G.add_edge(headID, firstNodeID, attr_dict=attributeDictPart1)
G.add_edge(firstNodeID, tailID, attr_dict=attributeDictPart2)
elif lastPoint in middle:
if headID == lastNodeID or lastNodeID == tailID:
continue
indexLastPoint = middle.index(lastPoint)
# copy attributes
attributeDictPart1 = attributeDict.copy()
attributeDictPart2 = attributeDict.copy()
# recalculate middle
attributeDictPart1['middle'] = middle[0:indexLastPoint]
attributeDictPart2['middle'] = middle[indexLastPoint+1:]
# recalculate length
roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]
roadPart1.extend(middle[0:indexLastPoint+1])
roadPart2 = middle[indexLastPoint:]
roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))
attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)
attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)
G.remove_edge(headID, tailID)
G.add_edge(headID, lastNodeID, attr_dict=attributeDictPart1)
G.add_edge(lastNodeID, tailID, attr_dict=attributeDictPart2)
### remove middle properties ##################################################
for edge in G.edges_iter():
G[edge[0]][edge[1]].pop('middle')
### remove zeros neighbor nodes ###############################################
for node in G.nodes():
if G.in_degree()[node] == 0 and G.out_degree()[node] == 0:
print(node)
G.remove_node(node)
### check if 2 node same lat long #############################################
lat = G.node[0]['lat']
lng = G.node[0]['lng']
sameCount = -1
for i in G.nodes_iter():
if G.node[i]['lat'] == lat and G.node[i]['lng'] == lng:
sameCount += 1
else:
lat = G.node[i]['lat']
lng = G.node[i]['lng']
print('same location Count: ',sameCount)
### check for self loop in result graph #######################################
self_loop_count = 0
for node in G.nodes_iter():
if node in G.neighbors(node):
self_loop_count += 1
print(node, G.neighbors(node))
print('self_loop_count: ', self_loop_count)
# nx.write_gexf(G,'./highway_line_singlepart.gexf')
# nx.write_gexf(G,'./highway_line_singlepart_new_length.gexf')
# nx.write_gexf(G,'./highway_line_singlepart_new_123.gexf')
nx.write_gexf(G,'./graphdata/highway_line_singlepart_new_length.gexf')
# create links between nodes
# add metadata of links
# save graph
# release dataset
layer = None
dataSource = None
|
tensorflow/similarity
|
tensorflow_similarity/retrieval_metrics/map_at_k.py
|
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class MapAtK(RetrievalMetric):
"""Mean Average precision - mAP@K is computed as.
$$
mAP_i@K = \frac{\sum_{j = 1}^{K} {rel_i_j}\times{P_i@j}}{R}
$$
Where: K is the number of neighbors in the i_th query result set.
P is the rolling precision over the i_th query result set.
R is the cardinality of the target class.
rel is the relevance mask (indicator function) for the i_th query.
i represents the i_th query.
j represents the j_th ranked query result.
AP@K is biased towards the top ranked results and is a function of the rank
(K), the relevancy mask (rel), and the number of indexed examples for the
class (R). The denominator for the i_th query is set to the number of
indexed examples (R) for the class associated with the i_th query.
For example, if the index has has 100 embedded examples (R) of class 'a',
and our query returns 50 results (K) where the top 10 results are all TPs,
then the AP@50 will be 0.10; however, if instead the bottom 10 ranked
results are all TPs, then the AP@50 will be much lower (0.012) because we
apply a penalty for the 40 FPs that come before the relevant query results.
This metric is useful when we want to ensure that the top ranked results
are relevant to the query; however, it requires that we pass a mapping from
the class id to the number of indexed examples for that class.
Args:
r: A mapping from class id to the number of examples in the index,
e.g., r[4] = 10 represents 10 indexed examples from class 4.
name: Name associated with the metric object, e.g., avg_precision@5
canonical_name: The canonical name associated with metric, e.g.,
avg_precision@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro'} Determines the type of averaging performed over the
queries.
* 'micro': Calculates metrics globally over all queries.
"""
def __init__(
self,
r: Mapping[int, int],
name: str = "map",
k: int = 5,
average: str = "micro",
**kwargs,
) -> None:
if average == "macro":
raise ValueError(
"Mean Average Precision only supports micro averaging."
)
if "canonical_name" not in kwargs:
kwargs["canonical_name"] = "map@k"
super().__init__(name=name, k=k, average=average, **kwargs)
self.r = r
def get_config(self):
config = {
"r": self.r,
}
base_config = super().get_config()
return {**base_config, **config}
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Args:
query_labels: A 1D array of the labels associated with the
embedding queries.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
**kwargs: Additional compute args
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
k_slice = tf.cast(match_mask[:, : self.k], dtype="float")
tp = tf.math.cumsum(k_slice, axis=1)
p_at_k = tf.math.divide(tp, tf.range(1, self.k + 1, dtype="float"))
p_at_k = tf.math.multiply(k_slice, p_at_k)
if self.average == "micro":
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
list(self.r.keys()),
list(self.r.values()),
key_dtype=tf.int32,
value_dtype=tf.int32,
),
default_value=-1,
)
class_counts = table.lookup(query_labels)
avg_p_at_k = tf.math.divide(
tf.math.reduce_sum(p_at_k, axis=1),
tf.cast(class_counts, dtype="float"),
)
avg_p_at_k = tf.math.reduce_mean(avg_p_at_k)
else:
raise ValueError(
f"{self.average} is not a supported average option"
)
result: FloatTensor = avg_p_at_k
return result
|
tylertian/Openstack
|
openstack F/glance/tools/migrate_image_owners.py
|
#!/usr/bin/python
import sys
import keystoneclient.v2_0.client
import glance.context
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
import glance.registry.context
import glance.db.sqlalchemy.api as db_api
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.DEBUG)
def get_owner_map(ksclient, owner_is_tenant=True):
if owner_is_tenant:
entities = ksclient.tenants.list()
else:
entities = ksclient.users.list()
# build mapping of (user or tenant) name to id
return dict([(entity.name, entity.id) for entity in entities])
def build_image_owner_map(owner_map, db, context):
image_owner_map = {}
for image in db.image_get_all(context):
image_id = image['id']
owner_name = image['owner']
if not owner_name:
LOG.info('Image %s has no owner. Skipping.' % image_id)
continue
try:
owner_id = owner_map[owner_name]
except KeyError:
msg = 'Image %s owner %s was not found. Skipping.'
LOG.error(msg % (image_id, owner_name))
continue
image_owner_map[image_id] = owner_id
msg = 'Image %s owner %s -> %s' % (image_id, owner_name, owner_id)
LOG.info(msg)
return image_owner_map
def update_image_owners(image_owner_map, db, context):
for (image_id, image_owner) in image_owner_map.items():
db.image_update(context, image_id, {'owner': image_owner})
LOG.info('Image %s successfully updated.' % image_id)
if __name__ == "__main__":
config = cfg.CONF
extra_cli_opts = [
cfg.BoolOpt('dry-run',
help='Print output but do not make db changes.'),
cfg.StrOpt('keystone-auth-uri',
help='Authentication endpoint'),
cfg.StrOpt('keystone-admin-tenant-name',
help='Administrative user\'s tenant name'),
cfg.StrOpt('keystone-admin-user',
help='Administrative user\'s id'),
cfg.StrOpt('keystone-admin-password',
help='Administrative user\'s password'),
]
config.register_cli_opts(extra_cli_opts)
config(project='glance', prog='glance-registry')
db_api.configure_db()
context = glance.common.context.RequestContext(is_admin=True)
auth_uri = config.keystone_auth_uri
admin_tenant_name = config.keystone_admin_tenant_name
admin_user = config.keystone_admin_user
admin_password = config.keystone_admin_password
if not (auth_uri and admin_tenant_name and admin_user and admin_password):
LOG.critical('Missing authentication arguments')
sys.exit(1)
ks = keystoneclient.v2_0.client.Client(username=admin_user,
password=admin_password,
tenant_name=admin_tenant_name,
auth_url=auth_uri)
owner_map = get_owner_map(ks, config.owner_is_tenant)
image_updates = build_image_owner_map(owner_map, db_api, context)
if not config.dry_run:
update_image_owners(image_updates, db_api, context)
|
IBMPredictiveAnalytics/GATHERMD
|
src/GATHERMD.py
|
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
# Construct a dataset listing the variables and selected properties for a collection of data files
# 05-23-2008 Original version - JKP
# 04-29-2009 Add file handle support
# 11-16-2009 Protect against UP converting escape sequences with "\" characters
#12-16-2009 Enable translation
__version__ = "1.2.1"
__author__ = "JKP, SPSS"
#try:
#import wingdbstub
#except:
#pass
import spss, os, re, locale
import spssaux
from extension import Template, Syntax
try:
from extension import processcmd
except:
print("""This command requires a newer version of extension.py. Please download it from
SPSS Developer Central and replace the existing file""")
raise
class DataStep(object):
def __enter__(self):
"""initialization for with statement"""
try:
spss.StartDataStep()
except:
spss.Submit("EXECUTE")
spss.StartDataStep()
return self
def __exit__(self, type, value, tb):
spss.EndDataStep()
return False
# The following block of code is for using the gather function as an Extension command.
def Run(args):
"""Execute the GATHERMD command"""
###print args #debug
args = args[list(args.keys())[0]]
helptext=r"""GATHERMD
Create and activate a dataset whose cases are variable names and labels
and, optionally, selected attributes from one or more data files.
GATHERMD list-of-specifications
[/OPTIONS [FILETYPES=*spss sas stata]
[DSNAME=name]
[FILENAMEPATTERN="pattern expression"]]
[ATTRLENGTH=value]
[/ATTRIBUTES list-of-attribute-names]
[HELP].
list-of-specifications is a list of one or more filenames, optionally with paths, and/or directories.
For directories, all appropriate files in the directory and its subdirectories are searched. With version 18
or later, the file specifications can include PASW Statistics file handles.
FILETYPES defaults to SPSS files (.sav and .por).
sas files are .sas7bdat, .sd7, .sd2, .ssd01, and .xpt
stata files are .dta
Files with any of the specified types found in the directories specified are searched. Since
these files are opened in SPSS, if the same file is already open in SPSS, it will be reopened
without saving any changes that may have been made.
DSNAME optionally specifies a dataset name to be assigned to the output dataset.
FILENAMEPATTERN can be specified as a quoted literal containing a regular expression pattern
to be used as a filter on filenames. For example, FILENAMEPATTERN="car" would limit the
files searched to those whose name starts with "car". FILENAMEPATTERN=".*car" would accept
any filenames containing "car". These are not the same as filename wildcards found in many operating systems.
For example, "abc*" will match any name starting with ab: it means literally ab followed by zero or more c's.
The regular expression is not case sensitive, and it is applied to the name of the
file without the extension. For a full explanation of regular expressions, one good source is
http://www.amk.ca/python/howto/regex/
/ATTRIBUTES list-of-names
specifies a list of custom variable attributes to be included in the output dataset. The variable
names will be the attribute names except if they conflict with the built-in variables source,
VariableName, and VariableLabel. If the attribute is not present, the value will be blank.
If the attribute is an array, only the first value is included.
Attribute variables in the output dataset are truncated to the length specified in ATTRLENGTH,
which defaults to 256
/HELP displays this text and does nothing else.
Examples:
GATHERMD "c:/spss17/samples".
gathermd "c:/temp/firstlevel" "c:/spss16/samples/voter.sav" /options filetypes=spss sas
dsname=gathered.
"""
# define the command syntax and enable translation
oobj = Syntax([
Template("", subc="", var="files", ktype="literal", islist=True),
Template("FILETYPES", subc="OPTIONS", var="filetypes", ktype="str", islist=True),
Template("FILENAMEPATTERN", subc="OPTIONS", var="filenamepattern", ktype="literal"),
Template("DSNAME", subc="OPTIONS", var="dsname", ktype="varname"),
Template("ATTRLENGTH", subc="OPTIONS", var="attrlength", ktype="int", vallist=(1, 32767)),
Template("", subc="ATTRIBUTES", var="attrlist", ktype="varname", islist=True)])
global _
try:
_("---")
except:
def _(msg):
return msg
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, gather)
#oobj.parsecmd(args, vardict = spssaux.VariableDict())
#gather(**oobj.parsedparams)
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
def gather(files, filetypes=["spss"], filenamepattern=None, dsname=None,attrlist=[], attrlength=256):
"""Create SPSS dataset listing variable names, variable labels, and source files for selected files. Return the name of the new dataset.
files is a list of files and/or directories. If an item is a file, it is processed; if it is a directory, the files and subdirectories
it contains are processed.
filetypes is a list of filetypes to process. It defaults to ["spss"] which covers sav and por. It can also include
"sas" for sas7bdat, sd7, sd2, ssd01, and xpt, and "stata" for dta
filenamepattern is an optional parameter that can contain a regular expression to be applied to the filenames to filter the
datasets that are processed. It is applied to the filename itself, omitting any directory path and file extension. The expression
is anchored to the start of the name and ignores case.
dsname is an optional name to be assigned to the new dataset. If not specified, a name will be automatically generated.
If dsname is specified, it will become the active dataset; otherwise, it need not be the active dataset.
attrlist is an optional list of custom attributes to be included in the output. For array attributes, only the first item is
recorded. The value is blank if the attribute is not present for the variable. Attribute variables are
strings of size attrlength bytes, truncated appropriately.
The output is just a dataset. It must be saved, if desired, after this function has completed.
Its name is the return value of this function.
Exception is raised if any files not found.
Examples:
gathermetadata.gather(["c:/temp/firstlevel", "c:/spss16/samples/voter.sav"], ["spss", "sas"])
searches spss and sas files in or under the temp/firstlevel directory plus the voter file.
gathermetadata.gather(["c:/temp/firstlevel"], filenamepattern="car")
searches the firstlevel directory for spss files whose names start with "car".
"""
encoding = locale.getlocale()[1]
filetypes = [f.lower() for f in filetypes]
for ft in filetypes:
if not ft in ["spss", "sas", "stata"]:
raise ValueError(_("Filetypes must be one or more of spss, sas, and stata."))
dsvars = {"source":"source", "variablename":"VariableName", "variablelabel":"variableLabel"}
with DataStep():
ds = spss.Dataset(name=None)
dsn = ds.name
varlist = ds.varlist
varlist.append("source",200)
varlist["source"].label=_("File containing the variable")
varlist.append("variableName", 64)
varlist["variableName"].label = _("Variable Name")
varlist.append("variableLabel", 256)
varlist["variableLabel"].label = _("Variable Label")
attrindexes = {}
for i, aname in enumerate(attrlist):
anamemod = addunique(dsvars, aname)
varlist.append(dsvars[anamemod], attrlength)
attrindexes[aname.lower()] = i
addvarinfo = makeaddinfo(dsn, filetypes, filenamepattern, dsvars, attrindexes, attrlength) #factory function
files = [fixescapes(f) for f in files] #UP is converting escape characters :-)
# walk the list of files and directories and open
try: # will fail if spssaux is prior to version 2.3
fh = spssaux.FileHandles()
except:
pass
notfound = []
for item in files:
try:
item = fh.resolve(item)
except:
pass
if os.path.isfile(item):
addvarinfo(item)
elif os.path.isdir(item):
for dirpath, dirnames, fnames in os.walk(item):
for f in fnames:
try:
addvarinfo(os.path.join(dirpath, f))
except EnvironmentError as e:
notfound.append(e.args[0])
else:
if not isinstance(item, str):
item = str(item, encoding)
notfound.append(_("Not found: %s") % item)
spss.Submit("DATASET ACTIVATE %s." % dsn)
if not dsname is None:
spss.Submit("DATASET NAME %s." % dsname)
dsn = dsname
if notfound:
raise ValueError("\n".join(notfound))
return dsn
def makeaddinfo(dsname, filetypes, filenamepattern, dsvars, attrindexes, attrlength):
"""create a function to add variable information to a dataset.
dsname is the dataset name to append to.
filetypes is the list of file types to include.
filenamepattern is a regular expression to filter filename roots.
dsvars is a special dictionary of variables and attributes. See function addunique.
attrindexes is a dictionary with keys of lower case attribute names and values as the dataset index starting with 0.
attrlength is the size of the attribute string variables"""
ftdict = {"spss":[".sav", ".por"], "sas":[".sas7bdat",".sd7",".sd2",".ssd01",".xpt"], "stata":[".dta"]}
spsscmd = {"spss":"GET FILE='%s'.", "sas": "GET SAS DATA='%s'.", "stata": "GET STATA FILE='%s'."}
if filenamepattern:
try:
pat = re.compile(filenamepattern, re.IGNORECASE)
except:
raise ValueError(_("Invalid filenamepattern: %s") % filenamepattern)
else:
pat = None
ll = len(dsvars)
includeAttrs = ll > 3
blanks = (ll-3) * [" "]
def addinfo(filespec):
"""open the file if appropriate type, extract variable information, and add it to dataset dsname.
filespec is the file to open
dsname is the dataset name to append to
filetypes is the list of file types to include."""
fnsplit = os.path.split(filespec)[1]
fn, ext = os.path.splitext(fnsplit)
for ft in filetypes:
if ext in ftdict[ft]:
if pat is None or pat.match(fn):
try:
spss.Submit(spsscmd[ft] % filespec)
spss.Submit("DATASET NAME @__GATHERMD__.")
except:
if not isinstance(filespec, str):
filespec = str(filespec, encoding)
raise EnvironmentError(_("File could not be opened, skipping: %s") % filespec)
break
else:
return addinfo
with DataStep():
ds = spss.Dataset(name=dsname) # not the active dataset
dssource = spss.Dataset(name="*") # The dataset to examine
numvars = spss.GetVariableCount() # active dataset
variables = dssource.varlist
for v in range(numvars):
lis = [filespec.replace("\\","/"), spss.GetVariableName(v), spss.GetVariableLabel(v)]
lis.extend(blanks)
lis = [item+ 256*" " for item in lis]
ds.cases.append(lis)
#ds.cases.append([filespec.replace("\\","/"), spss.GetVariableName(v), spss.GetVariableLabel(v), *blanks])
if includeAttrs:
attrs = variables[v].attributes.data
for a in attrs:
if a.lower() in attrindexes:
ds.cases[-1, attrindexes[a.lower()]+ 3] = attrs[a][0] + attrlength * " "# allow for standard variables
spss.Submit("DATASET CLOSE @__GATHERMD__.")
return addinfo
def addunique(dsdict, key):
"""Add modified version of key to dictionary dsdict. Return generated key.
dsdict is a dictionary whose keys will be lower case strings and whose values are unique SPSS variable names.
duplicate keys are ignored.
keys are automatically prefixed with "*" to separate them from variable names that could be identical."""
key1 = "*" + key.lower()
if key1 in dsdict:
return key1
# make a version of key that is unique in the dictionary values and a legal variable name length
i=0
keymod = spssaux.truncatestring(key, 64)
while keymod.lower() in [k.lower() for k in list(dsdict.values())]:
keymod = spssaux.truncatestring(key, 59) + "_" + str(i)
i += 1
dsdict[key1] = keymod
return key1
escapelist = [('\a', r'\a'), ('\b', r'\b'), ('\f', r'\f'), ('\n', r'\n'), ('\r', r'\r'), ('\t',r'\t'),('\v', r'\v')]
def fixescapes(item):
for esc, repl in escapelist:
item = item.replace(esc, repl)
return item
# Example.
'''dsname = gather(["c:/temp/firstlevel"], filetypes=['spss','sas'], attrlist=['importance', 'relevance', 'VariableLabel'])
spss.Submit(r"""DATASET ACTIVATE %s.
SAVE OUTFILE='c:/temp2/gathered.sav'.""" % dsname)
dsname=gather(["c:/spss16/samples/employee data.sav"])'''
|
metamarkovic/jobChecker
|
jobChecker_lisa.py
|
from pexpect import pxssh
import ConfigParser
class jobChecker():
config_path = './config.lisa.ini'
s = pxssh.pxssh()
def readConfig(self):
self.config.read(self.config_path)
self.hostname = self.config.get('Credentials', 'hostname')
self.username = self.config.get('Credentials', 'username')
self.password = self.config.get('Credentials', 'password')
self.email = self.config.get('Credentials', 'email')
self.command1 = self.config.get('Commands', 'command1')
self.command2 = self.config.get('Commands', 'command2')
self.experimentDown = self.config.get('Message', 'experimentDown')
self.checkerFailed = self.config.get('Message', 'checkerFailed')
def __init__(self):
self.config = ConfigParser.RawConfigParser()
def retrieveOutput(self):
"""Connects to ssh server and inputs commands specified in the config file
"""
self.readConfig()
try:
self.s.login(self.hostname, self.username, self.password)
# self.s.sendline(self.command1) # run a command
# self.s.prompt() # match the prompt
# self.matchIndex(self.experimentDown,4)
# print self.s.before
# print self.s.before
# outputEmpty1 = 'Total Jobs: 0 Active Jobs: 0 Idle Jobs: 0 Blocked Jobs: 0'
# if outputEmpty1 in output1:
# self.errorAlert()
self.s.sendline(self.command2) # run a command
self.s.prompt() # match the prompt
self.matchIndex(self.experimentDown,8)
# outputEmpty2 = ''
# if outputEmpty2 in output2:
# self.errorAlert()
except pxssh.ExceptionPxssh, e:
print "pxssh failed on login."
print str(e)
def matchIndex(self,emailSubject,indexMinLength):
if "main-resub.sh" in self.s.before:
emailSubject = 'main script running'
self.errorAlert(emailSubject)
else:
emailSubject = 'main script NICHT running'
self.errorAlert(emailSubject)
#old:
# lines = self.s.before.split('\r\n') # \n is the linebreak character on unix, i.e. split by newline
# print lines
# if len(lines) < indexMinLength:
# self.errorAlert(emailSubject)
# else:
# pass
# except EOF:
# self.errorAlert(self.checkerFailed)
# except TIMEOUT:
# self.errorAlert(self.checkerFailed)
def errorAlert(self, emailSubject):
"""Sends an email if there are no jobs running
"""
self.s.sendline('date | mail -s "' + emailSubject + '" ' + self.email)
self.s.prompt()
def initialize(self):
self.readConfig()
self.retrieveOutput()
checker = jobChecker()
checker.initialize()
|
lliss/tr-55
|
tr55/model.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 Model Implementation
A mapping between variable/parameter names found in the TR-55 document
and variables used in this program are as follows:
* `precip` is referred to as P in the report
* `runoff` is Q
* `evaptrans` maps to ET, the evapotranspiration
* `inf` is the amount of water that infiltrates into the soil (in inches)
* `init_abs` is Ia, the initial abstraction, another form of infiltration
"""
import copy
from tr55.tablelookup import lookup_cn, lookup_bmp_infiltration, \
lookup_ki, is_bmp, is_built_type, make_precolumbian, get_pollutants
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.operations import dict_plus
def runoff_pitt(precip, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
"""
c1 = +3.638858398e-2
c2 = -1.243464039e-1
c3 = +1.295682223e-1
c4 = +9.375868043e-1
c5 = -2.235170859e-2
c6 = +0.170228067e+0
c7 = -3.971810782e-1
c8 = +3.887275538e-1
c9 = -2.289321859e-2
p4 = pow(precip, 4)
p3 = pow(precip, 3)
p2 = pow(precip, 2)
impervious = (c1 * p3) + (c2 * p2) + (c3 * precip) + c4
urb_grass = (c5 * p4) + (c6 * p3) + (c7 * p2) + (c8 * precip) + c9
runoff_vals = {
'open_water': impervious,
'developed_low': 0.20 * impervious + 0.80 * urb_grass,
'cluster_housing': 0.20 * impervious + 0.80 * urb_grass,
'developed_med': 0.65 * impervious + 0.35 * urb_grass,
'developed_high': impervious,
'developed_open': urb_grass
}
if land_use not in runoff_vals:
raise Exception('Land use %s not a built-type.' % land_use)
else:
return min(runoff_vals[land_use], precip)
def nrcs_cutoff(precip, curve_number):
"""
A function to find the cutoff between precipitation/curve number
pairs that have zero runoff by definition, and those that do not.
"""
if precip <= -1 * (2 * (curve_number - 100.0) / curve_number):
return True
else:
return False
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
"""
if land_use == 'cluster_housing':
land_use = 'developed_low'
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
return 0.0
potential_retention = (1000.0 / curve_number) - 10
initial_abs = 0.2 * potential_retention
precip_minus_initial_abs = precip - initial_abs
numerator = pow(precip_minus_initial_abs, 2)
denominator = (precip_minus_initial_abs + potential_retention)
runoff = numerator / denominator
return min(runoff, precip - evaptrans)
def simulate_cell_day(precip, evaptrans, cell, cell_count):
"""
Simulate a bunch of cells of the same type during a one-day event.
`precip` is the amount of precipitation in inches.
`evaptrans` is evapotranspiration.
`cell` is a string which contains a soil type and land use
separated by a colon.
`cell_count` is the number of cells to simulate.
The return value is a dictionary of runoff, evapotranspiration, and
infiltration as volumes of water.
"""
def clamp(runoff, et, inf, precip):
"""
This function clamps ensures that runoff + et + inf <= precip.
NOTE: infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
function can cause infiltration to be smaller than it
ordinarily would be.
"""
total = runoff + et + inf
if (total > precip):
scale = precip / total
runoff *= scale
et *= scale
inf *= scale
return (runoff, et, inf)
precip = max(0.0, precip)
soil_type, land_use, bmp = cell.lower().split(':')
# If there is no precipitation, then there is no runoff or
# infiltration. There is evapotranspiration, however (it is
# understood that over a period of time, this can lead to the sum
# of the three values exceeding the total precipitation).
if precip == 0.0:
return {
'runoff-vol': 0.0,
# 'et-vol': cell_count * evaptrans,
'et-vol': 0.0,
'inf-vol': 0.0,
}
# Deal with the Best Management Practices (BMPs). For most BMPs,
# the infiltration is read from the table and the runoff is what
# is left over after infiltration and evapotranspiration. Rain
# gardens are treated differently.
if bmp and is_bmp(bmp) and bmp != 'rain_garden':
inf = lookup_bmp_infiltration(soil_type, bmp) # infiltration
runoff = max(0.0, precip - (evaptrans + inf)) # runoff
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf
}
elif bmp and bmp == 'rain_garden':
# Here, return a mixture of 20% ideal rain garden and 80%
# high-intensity residential.
inf = lookup_bmp_infiltration(soil_type, bmp)
runoff = max(0.0, precip - (evaptrans + inf))
hi_res_cell = soil_type + ':developed_med:'
hi_res = simulate_cell_day(precip, evaptrans, hi_res_cell, 1)
hir_run = hi_res['runoff-vol']
hir_et = hi_res['et-vol']
hir_inf = hi_res['inf-vol']
final_runoff = (0.2 * runoff + 0.8 * hir_run)
final_et = (0.2 * evaptrans + 0.8 * hir_et)
final_inf = (0.2 * inf + 0.8 * hir_inf)
final = clamp(final_runoff, final_et, final_inf, precip)
(final_runoff, final_et, final_inf) = final
return {
'runoff-vol': cell_count * final_runoff,
'et-vol': cell_count * final_et,
'inf-vol': cell_count * final_inf
}
# At this point, if the `bmp` string has non-zero length, it is
# equal to either 'no_till' or 'cluster_housing'.
if bmp and bmp != 'no_till' and bmp != 'cluster_housing':
raise KeyError('Unexpected BMP: %s' % bmp)
land_use = bmp or land_use
# When the land use is a built-type and the level of precipitation
# is two inches or less, use the Pitt Small Storm Hydrology Model.
# When the land use is a built-type but the level of precipitation
# is higher, the runoff is the larger of that predicted by the
# Pitt model and NRCS model. Otherwise, return the NRCS amount.
if is_built_type(land_use) and precip <= 2.0:
runoff = runoff_pitt(precip, land_use)
elif is_built_type(land_use):
pitt_runoff = runoff_pitt(2.0, land_use)
nrcs_runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
runoff = max(pitt_runoff, nrcs_runoff)
else:
runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
inf = max(0.0, precip - (evaptrans + runoff))
(runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf,
}
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use with `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod
def create_modified_census(census):
"""
This creates a cell census, with modifications, that is suitable
for use with `simulate_water_quality`.
For every type of cell that undergoes modification, the
modifications are indicated with a sub-distribution under that
cell type.
"""
mod = copy.deepcopy(census)
mod.pop('modifications', None)
for (cell, subcensus) in mod['distribution'].items():
n = subcensus['cell_count']
changes = {
'distribution': {
cell: {
'distribution': {
cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
for modification in (census.get('modifications') or []):
for (orig_cell, subcensus) in modification['distribution'].items():
n = subcensus['cell_count']
soil1, land1 = orig_cell.split(':')
soil2, land2, bmp = modification['change'].split(':')
changed_cell = '%s:%s:%s' % (soil2 or soil1, land2 or land1, bmp)
changes = {
'distribution': {
orig_cell: {
'distribution': {
orig_cell: {'cell_count': -n},
changed_cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
return mod
def simulate_water_quality(tree, cell_res, fn,
current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them at that node.
`tree` is the (sub)tree of cell distributions that is currently
under consideration.
`cell_res` is the size of each cell (used for turning inches of
water into volumes of water).
`fn` is a function that takes a cell type and a number of cells
and returns a dictionary containing runoff, et, and inf as
volumes.
`current_cell` is the cell type for the present node.
"""
# Internal node.
if 'cell_count' in tree and 'distribution' in tree:
n = tree['cell_count']
# simulate subtrees
if n != 0:
tally = {}
for cell, subtree in tree['distribution'].items():
simulate_water_quality(subtree, cell_res, fn,
cell, precolumbian)
subtree_ex_dist = subtree.copy()
subtree_ex_dist.pop('distribution', None)
tally = dict_plus(tally, subtree_ex_dist)
tree.update(tally) # update this node
# effectively a leaf
elif n == 0:
for pol in get_pollutants():
tree[pol] = 0.0
# Leaf node.
elif 'cell_count' in tree and 'distribution' not in tree:
# the number of cells covered by this leaf
n = tree['cell_count']
# canonicalize the current_cell string
split = current_cell.split(':')
if (len(split) == 2):
split.append('')
if precolumbian:
split[1] = make_precolumbian(split[1])
current_cell = '%s:%s:%s' % tuple(split)
# run the runoff model on this leaf
result = fn(current_cell, n) # runoff, et, inf
tree.update(result)
# perform water quality calculation
if n != 0:
soil_type, land_use, bmp = split
runoff_per_cell = result['runoff-vol'] / n
liters = get_volume_of_runoff(runoff_per_cell, n, cell_res)
for pol in get_pollutants():
tree[pol] = get_pollutant_load(land_use, pol, liters)
def postpass(tree):
"""
Remove volume units and replace them with inches.
"""
if 'cell_count' in tree:
if tree['cell_count'] > 0:
n = tree['cell_count']
tree['runoff'] = tree['runoff-vol'] / n
tree['et'] = tree['et-vol'] / n
tree['inf'] = tree['inf-vol'] / n
else:
tree['runoff'] = 0
tree['et'] = 0
tree['inf'] = 0
tree.pop('runoff-vol', None)
tree.pop('et-vol', None)
tree.pop('inf-vol', None)
if 'distribution' in tree:
for subtree in tree['distribution'].values():
postpass(subtree)
def simulate_modifications(census, fn, cell_res, precolumbian=False):
"""
Simulate effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`fn` is as described in `simulate_water_quality`.
`cell_res` is as described in `simulate_water_quality`.
"""
mod = create_modified_census(census)
simulate_water_quality(mod, cell_res, fn, precolumbian=precolumbian)
postpass(mod)
unmod = create_unmodified_census(census)
simulate_water_quality(unmod, cell_res, fn, precolumbian=precolumbian)
postpass(unmod)
return {
'unmodified': unmod,
'modified': mod
}
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that artificial types should be turned
into forest.
"""
et_max = 0.207
if 'modifications' in census:
verify_census(census)
def fn(cell, cell_count):
# Compute et for cell type
split = cell.split(':')
if (len(split) == 2):
(land_use, bmp) = split
else:
(_, land_use, bmp) = split
et = et_max * lookup_ki(bmp or land_use)
# Simulate the cell for one day
return simulate_cell_day(precip, et, cell, cell_count)
return simulate_modifications(census, fn, cell_res, precolumbian)
def verify_census(census):
"""
Assures that there is no soil type/land cover pair
in a modification census that isn't in the AoI census.
"""
for modification in census['modifications']:
for land_cover in modification['distribution']:
if land_cover not in census['distribution']:
raise ValueError("Invalid modification census")
|
EricssonResearch/calvin-base
|
calvin/runtime/south/calvinlib/mathlib/Arithmetic.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.calvinlib import base_calvinlib_object
from calvin.utilities.calvinlogger import get_logger
import operator
_log = get_logger(__name__)
class Arithmetic(base_calvinlib_object.BaseCalvinlibObject):
"""
Operations on numbers
"""
init_schema = {
"description": "Initialize module",
}
relation_schema = {
"description": "Get corresponding relation: >, <, =, !=, >=, <= (with obvious interpretation.)",
"type": "object",
"properties": {
"rel": { "type": "string" }
}
}
operator_schema = {
"description": "Get corresponding operator: +, -, /, *, div, mod (with obvious interpretation.)",
"type": "object",
"properties": {
"op": { "type": "string" }
}
}
eval_schema = {
"description": "Evaluate expression, returning result. Bindings should be a dictionary of variable mappings to use in evaluation",
"type": "object",
"properties": {
"expr": { "type": "string" },
"bindings": { "type": "object" }
}
}
def init(self):
pass
def relation(self, rel):
try:
return {
'<': operator.lt,
'<=': operator.le,
'=': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}[rel]
except KeyError:
_log.warning("Invalid operator '{}', will always return 'false'".format(rel))
return lambda x,y: False
def operator(self, op):
try:
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.div,
'div': operator.floordiv,
'mod': operator.mod,
}[op]
except KeyError:
_log.warning("Invalid operator '{}', will always produce 'null'".format(op))
return lambda x,y: None
def eval(self, expr, bindings):
try:
return eval(expr, {}, bindings)
except Exception as e:
return str(e)
|
googleapis/python-channel
|
samples/generated_samples/cloudchannel_v1_generated_cloud_channel_service_get_channel_partner_link_sync.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetChannelPartnerLink
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-channel
# [START cloudchannel_v1_generated_CloudChannelService_GetChannelPartnerLink_sync]
from google.cloud import channel_v1
def sample_get_channel_partner_link():
# Create a client
client = channel_v1.CloudChannelServiceClient()
# Initialize request argument(s)
request = channel_v1.GetChannelPartnerLinkRequest(
name="name_value",
)
# Make the request
response = client.get_channel_partner_link(request=request)
# Handle the response
print(response)
# [END cloudchannel_v1_generated_CloudChannelService_GetChannelPartnerLink_sync]
|
stweil/letsencrypt
|
certbot/certbot/compat/filesystem.py
|
"""Compat module to handle files security on Windows and Linux"""
from __future__ import absolute_import
import errno
import os # pylint: disable=os-module-forbidden
import stat
import sys
from typing import List
try:
import ntsecuritycon
import win32security
import win32con
import win32api
import win32file
import pywintypes
import winerror
except ImportError:
POSIX_MODE = True
else:
POSIX_MODE = False
# Windows umask implementation, since Windows does not have a concept of umask by default.
# We choose 022 as initial value since it is the default one on most Linux distributions, and
# it is a decent choice to not have write permissions for group owner and everybody by default.
# We use a class here to avoid needing to define a global variable, and the potential mistakes
# that could happen with this kind of pattern.
class _WindowsUmask:
"""Store the current umask to apply on Windows"""
def __init__(self):
self.mask = 0o022
_WINDOWS_UMASK = _WindowsUmask()
def chmod(file_path: str, mode: int) -> None:
"""
Apply a POSIX mode on given file_path:
- for Linux, the POSIX mode will be directly applied using chmod,
- for Windows, the POSIX mode will be translated into a Windows DACL that make sense for
Certbot context, and applied to the file using kernel calls.
The definition of the Windows DACL that correspond to a POSIX mode, in the context of Certbot,
is explained at https://github.com/certbot/certbot/issues/6356 and is implemented by the
method `_generate_windows_flags()`.
:param str file_path: Path of the file
:param int mode: POSIX mode to apply
"""
if POSIX_MODE:
os.chmod(file_path, mode)
else:
_apply_win_mode(file_path, mode)
def umask(mask: int) -> int:
"""
Set the current numeric umask and return the previous umask. On Linux, the built-in umask
method is used. On Windows, our Certbot-side implementation is used.
:param int mask: The user file-creation mode mask to apply.
:rtype: int
:return: The previous umask value.
"""
if POSIX_MODE:
return os.umask(mask)
previous_umask = _WINDOWS_UMASK.mask
_WINDOWS_UMASK.mask = mask
return previous_umask
# One could ask why there is no copy_ownership() function, or even a reimplementation
# of os.chown() that would modify the ownership of file without touching the mode itself.
# This is because on Windows, it would require recalculating the existing DACL against
# the new owner, since the DACL is composed of ACEs that targets a specific user, not dynamically
# the current owner of a file. This action would be necessary to keep consistency between
# the POSIX mode applied to the file and the current owner of this file.
# Since copying and editing arbitrary DACL is very difficult, and since we actually know
# the mode to apply at the time the owner of a file should change, it is easier to just
# change the owner, then reapply the known mode, as copy_ownership_and_apply_mode() does.
def copy_ownership_and_apply_mode(src: str, dst: str, mode: int,
copy_user: bool, copy_group: bool) -> None:
"""
Copy ownership (user and optionally group on Linux) from the source to the
destination, then apply given mode in compatible way for Linux and Windows.
This replaces the os.chown command.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param int mode: Permission mode to apply on the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
# On Windows, os.chown does not exist. This is checked through POSIX_MODE value,
# but MyPy/PyLint does not know it and raises an error here on Windows.
# We disable specifically the check to fix the issue.
os.chown(dst, user_id, group_id)
elif copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
chmod(dst, mode)
# Quite similar to copy_ownership_and_apply_mode, but this time the DACL is copied from
# the source file on Windows. The DACL stays consistent with the dynamic rights of the
# equivalent POSIX mode, because ownership and mode are copied altogether on the destination
# file, so no recomputing of the DACL against the new owner is needed, as it would be
# for a copy_ownership alone method.
def copy_ownership_and_mode(src: str, dst: str,
copy_user: bool = True, copy_group: bool = True) -> None:
"""
Copy ownership (user and optionally group on Linux) and mode/DACL
from the source to the destination.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
# On Linux, we just delegate to chown and chmod.
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
os.chown(dst, user_id, group_id)
chmod(dst, stats.st_mode)
else:
if copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
_copy_win_mode(src, dst)
def check_mode(file_path: str, mode: int) -> bool:
"""
Check if the given mode matches the permissions of the given file.
On Linux, will make a direct comparison, on Windows, mode will be compared against
the security model.
:param str file_path: Path of the file
:param int mode: POSIX mode to test
:rtype: bool
:return: True if the POSIX mode matches the file permissions
"""
if POSIX_MODE:
return stat.S_IMODE(os.stat(file_path).st_mode) == mode
return _check_win_mode(file_path, mode)
def check_owner(file_path: str) -> bool:
"""
Check if given file is owned by current user.
:param str file_path: File path to check
:rtype: bool
:return: True if given file is owned by current user, False otherwise.
"""
if POSIX_MODE:
return os.stat(file_path).st_uid == os.getuid()
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# Compare sids
return _get_current_user() == user
def check_permissions(file_path: str, mode: int) -> bool:
"""
Check if given file has the given mode and is owned by current user.
:param str file_path: File path to check
:param int mode: POSIX mode to check
:rtype: bool
:return: True if file has correct mode and owner, False otherwise.
"""
return check_owner(file_path) and check_mode(file_path, mode)
def open(file_path: str, flags: int, mode: int = 0o777) -> int: # pylint: disable=redefined-builtin
"""
Wrapper of original os.open function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int flags: Flags to apply on file while opened
:param int mode: POSIX mode to apply on file when opened,
Python defaults will be applied if ``None``
:returns: the file descriptor to the opened file
:rtype: int
:raise: OSError(errno.EEXIST) if the file already exists and os.O_CREAT & os.O_EXCL are set,
OSError(errno.EACCES) on Windows if the file already exists and is a directory, and
os.O_CREAT is set.
"""
if POSIX_MODE:
# On Linux, invoke os.open directly.
return os.open(file_path, flags, mode)
# Windows: handle creation of the file atomically with proper permissions.
if flags & os.O_CREAT:
# If os.O_EXCL is set, we will use the "CREATE_NEW", that will raise an exception if
# file exists, matching the API contract of this bit flag. Otherwise, we use
# "CREATE_ALWAYS" that will always create the file whether it exists or not.
disposition = win32con.CREATE_NEW if flags & os.O_EXCL else win32con.CREATE_ALWAYS
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
# We set second parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptorowner # pylint: disable=line-too-long
security.SetSecurityDescriptorOwner(user, 0)
# We set first parameter to 1 (`True`) to say that this security descriptor contains
# a DACL. Otherwise second and third parameters are ignored.
# We set third parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptordacl # pylint: disable=line-too-long
security.SetSecurityDescriptorDacl(1, dacl, 0)
handle = None
try:
handle = win32file.CreateFile(file_path, win32file.GENERIC_READ,
win32file.FILE_SHARE_READ & win32file.FILE_SHARE_WRITE,
attributes, disposition, 0, None)
except pywintypes.error as err:
# Handle native windows errors into python errors to be consistent with the API
# of os.open in the situation of a file already existing or locked.
if err.winerror == winerror.ERROR_FILE_EXISTS:
raise OSError(errno.EEXIST, err.strerror)
if err.winerror == winerror.ERROR_SHARING_VIOLATION:
raise OSError(errno.EACCES, err.strerror)
raise err
finally:
if handle:
handle.Close()
# At this point, the file that did not exist has been created with proper permissions,
# so os.O_CREAT and os.O_EXCL are not needed anymore. We remove them from the flags to
# avoid a FileExists exception before calling os.open.
return os.open(file_path, flags ^ os.O_CREAT ^ os.O_EXCL)
# Windows: general case, we call os.open, let exceptions be thrown, then chmod if all is fine.
handle = os.open(file_path, flags)
chmod(file_path, mode)
return handle
def makedirs(file_path: str, mode: int = 0o777) -> None:
"""
Rewrite of original os.makedirs function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on leaf directory when created, Python defaults
will be applied if ``None``
"""
current_umask = umask(0)
try:
# Since Python 3.7, os.makedirs does not set the given mode to the intermediate
# directories that could be created in the process. To keep things safe and consistent
# on all Python versions, we set the umask accordingly to have all directories
# (intermediate and leaf) created with the given mode.
umask(current_umask | 0o777 ^ mode)
if POSIX_MODE:
return os.makedirs(file_path, mode)
orig_mkdir_fn = os.mkdir
try:
# As we know that os.mkdir is called internally by os.makedirs, we will swap the
# function in os module for the time of makedirs execution on Windows.
os.mkdir = mkdir # type: ignore
return os.makedirs(file_path, mode)
finally:
os.mkdir = orig_mkdir_fn
finally:
umask(current_umask)
def mkdir(file_path: str, mode: int = 0o777) -> None:
"""
Rewrite of original os.mkdir function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on directory when created, Python defaults
will be applied if ``None``
"""
if POSIX_MODE:
return os.mkdir(file_path, mode)
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
security.SetSecurityDescriptorOwner(user, False)
security.SetSecurityDescriptorDacl(1, dacl, 0)
try:
win32file.CreateDirectory(file_path, attributes)
except pywintypes.error as err:
# Handle native windows error into python error to be consistent with the API
# of os.mkdir in the situation of a directory already existing.
if err.winerror == winerror.ERROR_ALREADY_EXISTS:
raise OSError(errno.EEXIST, err.strerror, file_path, err.winerror)
raise err
return None
def replace(src: str, dst: str) -> None:
"""
Rename a file to a destination path and handles situations where the destination exists.
:param str src: The current file path.
:param str dst: The new file path.
"""
if hasattr(os, 'replace'):
# Use replace if possible. Since we don't support Python 2 on Windows
# and os.replace() was added in Python 3.3, we can assume that
# os.replace() is always available on Windows.
getattr(os, 'replace')(src, dst)
else:
# Otherwise, use os.rename() that behaves like os.replace() on Linux.
os.rename(src, dst)
def realpath(file_path: str) -> str:
"""
Find the real path for the given path. This method resolves symlinks, including
recursive symlinks, and is protected against symlinks that creates an infinite loop.
:param str file_path: The path to resolve
:returns: The real path for the given path
:rtype: str
"""
original_path = file_path
# Since Python 3.8, os.path.realpath also resolves symlinks on Windows.
if POSIX_MODE or sys.version_info >= (3, 8):
path = os.path.realpath(file_path)
if os.path.islink(path):
# If path returned by realpath is still a link, it means that it failed to
# resolve the symlink because of a loop.
# See realpath code: https://github.com/python/cpython/blob/master/Lib/posixpath.py
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
return path
inspected_paths: List[str] = []
while os.path.islink(file_path):
link_path = file_path
file_path = os.readlink(file_path)
if not os.path.isabs(file_path):
file_path = os.path.join(os.path.dirname(link_path), file_path)
if file_path in inspected_paths:
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
inspected_paths.append(file_path)
return os.path.abspath(file_path)
def readlink(link_path: str) -> str:
"""
Return a string representing the path to which the symbolic link points.
:param str link_path: The symlink path to resolve
:return: The path the symlink points to
:returns: str
:raise: ValueError if a long path (260> characters) is encountered on Windows
"""
path = os.readlink(link_path)
if POSIX_MODE or not path.startswith('\\\\?\\'):
return path
# At this point, we know we are on Windows and that the path returned uses
# the extended form which is done for all paths in Python 3.8+
# Max length of a normal path is 260 characters on Windows, including the non printable
# termination character "<NUL>". The termination character is not included in Python
# strings, giving a max length of 259 characters, + 4 characters for the extended form
# prefix, to an effective max length 263 characters on a string representing a normal path.
if len(path) < 264:
return path[4:]
raise ValueError("Long paths are not supported by Certbot on Windows.")
# On Windows is_executable run from an unprivileged shell may claim that a path is
# executable when it is executable only if run from a privileged shell. This result
# is due to the fact that GetEffectiveRightsFromAcl calculate effective rights
# without taking into consideration if the target user has currently required the
# elevated privileges or not. However this is not a problem since certbot always
# requires to be run under a privileged shell, so the user will always benefit
# from the highest (privileged one) set of permissions on a given file.
def is_executable(path: str) -> bool:
"""
Is path an executable file?
:param str path: path to test
:return: True if path is an executable file
:rtype: bool
"""
if POSIX_MODE:
return os.path.isfile(path) and os.access(path, os.X_OK)
return _win_is_executable(path)
def has_world_permissions(path: str) -> bool:
"""
Check if everybody/world has any right (read/write/execute) on a file given its path.
:param str path: path to test
:return: True if everybody/world has any right to the file
:rtype: bool
"""
if POSIX_MODE:
return bool(stat.S_IMODE(os.stat(path).st_mode) & stat.S_IRWXO)
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
return bool(dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': win32security.ConvertStringSidToSid('S-1-1-0'),
}))
def compute_private_key_mode(old_key: str, base_mode: int) -> int:
"""
Calculate the POSIX mode to apply to a private key given the previous private key.
:param str old_key: path to the previous private key
:param int base_mode: the minimum modes to apply to a private key
:return: the POSIX mode to apply
:rtype: int
"""
if POSIX_MODE:
# On Linux, we keep read/write/execute permissions
# for group and read permissions for everybody.
old_mode = (stat.S_IMODE(os.stat(old_key).st_mode) &
(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH))
return base_mode | old_mode
# On Windows, the mode returned by os.stat is not reliable,
# so we do not keep any permission from the previous private key.
return base_mode
def has_same_ownership(path1: str, path2: str) -> bool:
"""
Return True if the ownership of two files given their respective path is the same.
On Windows, ownership is checked against owner only, since files do not have a group owner.
:param str path1: path to the first file
:param str path2: path to the second file
:return: True if both files have the same ownership, False otherwise
:rtype: bool
"""
if POSIX_MODE:
stats1 = os.stat(path1)
stats2 = os.stat(path2)
return (stats1.st_uid, stats1.st_gid) == (stats2.st_uid, stats2.st_gid)
security1 = win32security.GetFileSecurity(path1, win32security.OWNER_SECURITY_INFORMATION)
user1 = security1.GetSecurityDescriptorOwner()
security2 = win32security.GetFileSecurity(path2, win32security.OWNER_SECURITY_INFORMATION)
user2 = security2.GetSecurityDescriptorOwner()
return user1 == user2
def has_min_permissions(path: str, min_mode: int) -> bool:
"""
Check if a file given its path has at least the permissions defined by the given minimal mode.
On Windows, group permissions are ignored since files do not have a group owner.
:param str path: path to the file to check
:param int min_mode: the minimal permissions expected
:return: True if the file matches the minimal permissions expectations, False otherwise
:rtype: bool
"""
if POSIX_MODE:
st_mode = os.stat(path).st_mode
return st_mode == st_mode | min_mode
# Resolve symlinks, to get a consistent result with os.stat on Linux,
# that follows symlinks by default.
path = realpath(path)
# Get owner sid of the file
security = win32security.GetFileSecurity(
path, win32security.OWNER_SECURITY_INFORMATION | win32security.DACL_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
dacl = security.GetSecurityDescriptorDacl()
min_dacl = _generate_dacl(user, min_mode)
for index in range(min_dacl.GetAceCount()):
min_ace = min_dacl.GetAce(index)
# On a given ACE, index 0 is the ACE type, 1 is the permission mask, and 2 is the SID.
# See: http://timgolden.me.uk/pywin32-docs/PyACL__GetAce_meth.html
mask = min_ace[1]
user = min_ace[2]
effective_mask = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': user,
})
if effective_mask != effective_mask | mask:
return False
return True
def _win_is_executable(path):
if not os.path.isfile(path):
return False
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
mode = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': _get_current_user(),
})
return mode & ntsecuritycon.FILE_GENERIC_EXECUTE == ntsecuritycon.FILE_GENERIC_EXECUTE
def _apply_win_mode(file_path, mode):
"""
This function converts the given POSIX mode into a Windows ACL list, and applies it to the
file given its path. If the given path is a symbolic link, it will resolved to apply the
mode on the targeted file.
"""
file_path = realpath(file_path)
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# New DACL, that will overwrite existing one (including inherited permissions)
dacl = _generate_dacl(user, mode)
# Apply the new DACL
security.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(file_path, win32security.DACL_SECURITY_INFORMATION, security)
def _generate_dacl(user_sid, mode, mask=None):
if mask:
mode = mode & (0o777 - mask)
analysis = _analyze_mode(mode)
# Get standard accounts from "well-known" sid
# See the list here:
# https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
system = win32security.ConvertStringSidToSid('S-1-5-18')
admins = win32security.ConvertStringSidToSid('S-1-5-32-544')
everyone = win32security.ConvertStringSidToSid('S-1-1-0')
# New dacl, without inherited permissions
dacl = win32security.ACL()
# If user is already system or admins, any ACE defined here would be superseded by
# the full control ACE that will be added after.
if user_sid not in [system, admins]:
# Handle user rights
user_flags = _generate_windows_flags(analysis['user'])
if user_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, user_flags, user_sid)
# Handle everybody rights
everybody_flags = _generate_windows_flags(analysis['all'])
if everybody_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, everybody_flags, everyone)
# Handle administrator rights
full_permissions = _generate_windows_flags({'read': True, 'write': True, 'execute': True})
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, system)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, admins)
return dacl
def _analyze_mode(mode):
return {
'user': {
'read': mode & stat.S_IRUSR,
'write': mode & stat.S_IWUSR,
'execute': mode & stat.S_IXUSR,
},
'all': {
'read': mode & stat.S_IROTH,
'write': mode & stat.S_IWOTH,
'execute': mode & stat.S_IXOTH,
},
}
def _copy_win_ownership(src, dst):
# Resolve symbolic links
src = realpath(src)
security_src = win32security.GetFileSecurity(src, win32security.OWNER_SECURITY_INFORMATION)
user_src = security_src.GetSecurityDescriptorOwner()
security_dst = win32security.GetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION)
# Second parameter indicates, if `False`, that the owner of the file is not provided by some
# default mechanism, but is explicitly set instead. This is obviously what we are doing here.
security_dst.SetSecurityDescriptorOwner(user_src, False)
win32security.SetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION, security_dst)
def _copy_win_mode(src, dst):
# Resolve symbolic links
src = realpath(src)
# Copy the DACL from src to dst.
security_src = win32security.GetFileSecurity(src, win32security.DACL_SECURITY_INFORMATION)
dacl = security_src.GetSecurityDescriptorDacl()
security_dst = win32security.GetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION)
security_dst.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION, security_dst)
def _generate_windows_flags(rights_desc):
# Some notes about how each POSIX right is interpreted.
#
# For the rights read and execute, we have a pretty bijective relation between
# POSIX flags and their generic counterparts on Windows, so we use them directly
# (respectively ntsecuritycon.FILE_GENERIC_READ and ntsecuritycon.FILE_GENERIC_EXECUTE).
#
# But ntsecuritycon.FILE_GENERIC_WRITE does not correspond to what one could expect from a
# write access on Linux: for Windows, FILE_GENERIC_WRITE does not include delete, move or
# rename. This is something that requires ntsecuritycon.FILE_ALL_ACCESS.
# So to reproduce the write right as POSIX, we will apply ntsecuritycon.FILE_ALL_ACCESS
# subtracted of the rights corresponding to POSIX read and POSIX execute.
#
# Finally, having read + write + execute gives a ntsecuritycon.FILE_ALL_ACCESS,
# so a "Full Control" on the file.
#
# A complete list of the rights defined on NTFS can be found here:
# https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2003/cc783530(v=ws.10)#permissions-for-files-and-folders
flag = 0
if rights_desc['read']:
flag = flag | ntsecuritycon.FILE_GENERIC_READ
if rights_desc['write']:
flag = flag | (ntsecuritycon.FILE_ALL_ACCESS
^ ntsecuritycon.FILE_GENERIC_READ
^ ntsecuritycon.FILE_GENERIC_EXECUTE)
if rights_desc['execute']:
flag = flag | ntsecuritycon.FILE_GENERIC_EXECUTE
return flag
def _check_win_mode(file_path, mode):
# Resolve symbolic links
file_path = realpath(file_path)
# Get current dacl file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION
| win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
# Get current file owner sid
user = security.GetSecurityDescriptorOwner()
if not dacl:
# No DACL means full control to everyone
# This is not a deterministic permissions set.
return False
# Calculate the target dacl
ref_dacl = _generate_dacl(user, mode)
return _compare_dacls(dacl, ref_dacl)
def _compare_dacls(dacl1, dacl2):
"""
This method compare the two given DACLs to check if they are identical.
Identical means here that they contains the same set of ACEs in the same order.
"""
return ([dacl1.GetAce(index) for index in range(dacl1.GetAceCount())] ==
[dacl2.GetAce(index) for index in range(dacl2.GetAceCount())])
def _get_current_user():
"""
Return the pySID corresponding to the current user.
"""
# We craft the account_name ourselves instead of calling for instance win32api.GetUserNameEx,
# because this function returns nonsense values when Certbot is run under NT AUTHORITY\SYSTEM.
# To run Certbot under NT AUTHORITY\SYSTEM, you can open a shell using the instructions here:
# https://blogs.technet.microsoft.com/ben_parker/2010/10/27/how-do-i-run-powershell-execommand-prompt-as-the-localsystem-account-on-windows-7/
account_name = r"{0}\{1}".format(win32api.GetDomainName(), win32api.GetUserName())
# LookupAccountName() expects the system name as first parameter. By passing None to it,
# we instruct Windows to first search the matching account in the machine local accounts,
# then into the primary domain accounts, if the machine has joined a domain, then finally
# into the trusted domains accounts. This is the preferred lookup mechanism to use in Windows
# if there is no reason to use a specific lookup mechanism.
# See https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-lookupaccountnamea
return win32security.LookupAccountName(None, account_name)[0]
|
nchursin/ApexIntentionActions
|
helpers/PropertyActions.py
|
from . import logger
from . import TemplateHelper as TH
from . import RegexHelper as re
from . import Actions as A
from . import ClassActions as CA
from . import SublimeHelper as SH
from .SublimeHelper import ViewHelper as VH
log = logger.get(__name__)
class PropertyAction(A.Action):
"""Stores info on property actions"""
def __init__(self, name):
super(PropertyAction, self).__init__(name)
def get_class_code(self):
return self.full_region(self.code_region)
def get_prop_name(self):
result = re.findPropName(self.to_text(self.code_region))
log.debug('property name >> ' + result)
return result
def get_prop_type(self):
result = re.findPropType(self.to_text(self.code_region))
log.debug('property type >> ' + result)
return result
def is_prop_static(self):
result = re.findPropIsStatic(self.to_text(self.code_region))
log.debug('property static >> ', result)
return result
def generate_code(self):
raise Exception("generate_code not defined")
def is_applicable(self):
return re.is_prop_def(self.to_text(), True)
class AddGetterAction(PropertyAction):
def __init__(self):
super(AddGetterAction, self).__init__(A.ADD_GETTER)
def generate_code(self, edit):
template = TH.Template('other/getter')
template.addVar('type', self.get_prop_type())
template.addVar('varName', self.get_prop_name())
template.addVar('static', self.is_prop_static())
template.addVar('indent', self.get_inner_indent())
self.view.insert(edit, self.find_end_of_class().begin(), template.compile())
def is_applicable(self):
result = re.is_prop_def(self.to_text())
return result and re.findGetter(self.to_text(self.get_class_code()), self.get_prop_name()) is None
class AddSetterAction(PropertyAction):
def __init__(self):
super(AddSetterAction, self).__init__(A.ADD_SETTER)
def generate_code(self, edit):
template = TH.Template('other/setter')
template.addVar('type', self.get_prop_type())
template.addVar('varName', self.get_prop_name())
template.addVar('static', self.is_prop_static())
template.addVar('indent', self.get_inner_indent())
self.view.insert(edit, self.find_end_of_class().begin(), template.compile())
def is_applicable(self):
result = re.is_prop_def(self.to_text())
return result and re.findSetter(self.to_text(self.get_class_code()), self.get_prop_name()) is None
class AddGetterSetterAction(PropertyAction):
def __init__(self):
super(AddGetterSetterAction, self).__init__(A.ADD_GETTER_SETTER)
self.getter = AddGetterAction()
self.setter = AddSetterAction()
def setView(self, view):
super(AddGetterSetterAction, self).setView(view)
self.getter.setView(view)
self.setter.setView(view)
def setCode(self, code_region):
super(AddGetterSetterAction, self).setCode(code_region)
self.getter.setCode(code_region)
self.setter.setCode(code_region)
def is_applicable(self):
return self.getter.is_applicable() and self.setter.is_applicable()
def generate_code(self, edit):
self.getter.generate_code(edit)
self.setter.generate_code(edit)
class AddConstructorParameterAction(PropertyAction):
def __init__(self):
super(AddConstructorParameterAction, self).__init__(A.ADD_CONSTRUCTOR_PARAMETER)
def run(self, edit, args):
if 'constr_start' in args:
self.generate_code(edit, args['constr_start'])
else:
self.choose_constructor(edit)
def choose_constructor(self, edit):
constr_regions = self.find_constructors()
if not constr_regions:
constructorAction = CA.AddConstructorAction()
constructorAction.setView(self.view)
constructorAction.setCode(self.find_class_def())
constructorAction.generate_code(edit)
constr_regions = self.find_constructors()
if 1 == len(constr_regions):
self.generate_code(edit, constr_regions[0])
else:
constrs = []
for constr_region in constr_regions:
constrs.append(self.to_text(self.view.line(constr_region.begin())).strip())
self.vh.open_menu(list(constrs), self.handle_constr_choice)
def handle_constr_choice(self, index):
if -1 == index:
return
constr_regions = self.find_constructors()
args = {
'action_name': self.name,
'subl_line_start': self.code_region.begin(),
'subl_line_end': self.code_region.end(),
'constr_start': constr_regions[index].begin()
}
self.view.run_command('run_action', args)
def generate_code(self, edit, constr_start):
start = constr_start
def_line = self.view.line(start)
def_str = self.view.substr(def_line)
log.info('def_str >> ' + def_str)
args = re.findConstructorArgs(def_str)
log.info('args >> ' + str(args))
arg_def = self.get_prop_type() + ' ' + self.get_prop_name()
if args is not None:
arg_def = ', ' + arg_def
def_str = def_str.replace(')',
arg_def + ')')
self.view.replace(edit, def_line, def_str)
def_line = self.view.line(start)
indent = self.get_inner_indent() + '\t'
insert_to = def_line.end() + 1
first_line = self.view.line(insert_to)
if re.contains_regex(self.to_text(first_line), r'super\s*\('):
insert_to = first_line.end() + 1
text = '{indent}this.{varname} = {varname};\n'.format(indent=indent, varname=self.get_prop_name())
self.view.insert(edit, insert_to, text)
def is_applicable(self):
result = re.is_prop_def(self.to_text(), allow_get_set=True, allow_static=False)
result = result and re.findConstructorWithParam(
self.to_text(self.get_class_code()),
self.find_class_name(),
self.get_prop_name(),
self.get_prop_type()) is None
return result
class AddGetSetProps(PropertyAction):
def __init__(self):
super(AddGetSetProps, self).__init__(A.ADD_GET_SET_PROPS)
def generate_code(self, edit):
use_access = SH.get_setting('generate_access_get_set')
if use_access:
to_insert = ' { ${1:public} get; ${2:public} set; }'
else:
to_insert = ' { get; set; }'
line_text = self.to_text()
index_of_end = line_text.rfind(';')
index_of_end = self.begin() + index_of_end
sublimeHelper = VH(self.view)
sublimeHelper.insert_snippet(to_insert, (index_of_end, index_of_end + 1))
def is_applicable(self):
result = super(AddGetSetProps, self).is_applicable()
getter = AddGetterAction()
getter.setView(self.view)
getter.setCode(self.code_region)
setter = AddSetterAction()
setter.setView(self.view)
setter.setCode(self.code_region)
result = result and setter.is_applicable() and getter.is_applicable()
return result
|
spohnan/geowave
|
python/src/main/python/pygw/query/vector/vector_query_constraints_factory.py
|
#
# Copyright (c) 2013-2019 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
from ..query_constraints_factory import QueryConstraintsFactory
from ..query_constraints import QueryConstraints
from .spatial_temporal_constraints_builder import SpatialTemporalConstraintsBuilder
class VectorQueryConstraintsFactory(QueryConstraintsFactory):
"""
A query constraints factory with additional methods for creating spatial and/or
temporal constraints for vector data. Do not construct this class manually, instead,
get the constraints factory by using the `constraints_factory()` method of the
query builder.
"""
def spatial_temporal_constraints(self):
"""
Creates a spatial temporal constraints builder that can be used to construct
spatial and/or temporal constraints.
Returns:
A new `pygw.query.vector.spatial_temporal_constraints_builder.SpatialTemporalConstraintsBuilder`.
"""
return SpatialTemporalConstraintsBuilder(self._java_ref.spatialTemporalConstraints())
def filter_constraints(self, filter):
"""
Constrain a query using a filter created by pygw.query.FilterFactory.
Args:
filter (filter): The filter to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given filter.
"""
return QueryConstraints(self._java_ref.filterConstraints(filter))
def cql_constraints(self, cql_expression):
"""
Constrain a query using a CQL expression.
Args:
cql_expression (str): The CQL expression to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given CQL expression.
"""
return QueryConstraints(self._java_ref.cqlConstraints(cql_expression))
|
victorywang80/Maintenance
|
saltstack/src/salt/modules/dnsutil.py
|
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities
'''
# Import salt libs
import salt.utils
import socket
# Import python libs
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return 'dnsutil'
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.fopen(hostsfile, 'a') as fp_:
fp_.write(append_line)
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
host_list = entries.split(',')
out_file = salt.utils.fopen(hostsfile, 'w')
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write('{0}\n'.format(line))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(' '.join(comps))
out_file.write('\n')
out_file.close()
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.fopen(zonefile, 'r') as fp_:
zone = fp_.read()
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict.keys():
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.'):
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if not 'MX' in zonedict.keys():
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(time):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
time = time.upper()
if 'H' in time:
time = int(time.replace('H', '')) * 3600
elif 'D' in time:
time = int(time.replace('D', '')) * 86400
elif 'W' in time:
time = 604800
else:
try:
time = int(time)
except Exception:
time = 604800
if time < 604800:
time = 604800
return time
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record for 'host'.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dig.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
(hostname, aliases, addresses) = socket.gethostbyname_ex(host)
return addresses
except socket.error:
return 'Unabled to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dig.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dig.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
|
turbokongen/home-assistant
|
homeassistant/components/zwave_js/api.py
|
"""Websocket API for Z-Wave JS."""
import json
from aiohttp import hdrs, web, web_exceptions
import voluptuous as vol
from zwave_js_server import dump
from homeassistant.components import websocket_api
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_CLIENT, DOMAIN, EVENT_DEVICE_ADDED_TO_REGISTRY
ID = "id"
ENTRY_ID = "entry_id"
NODE_ID = "node_id"
TYPE = "type"
@callback
def async_register_api(hass: HomeAssistant) -> None:
"""Register all of our api endpoints."""
websocket_api.async_register_command(hass, websocket_network_status)
websocket_api.async_register_command(hass, websocket_node_status)
websocket_api.async_register_command(hass, websocket_add_node)
websocket_api.async_register_command(hass, websocket_stop_inclusion)
websocket_api.async_register_command(hass, websocket_remove_node)
websocket_api.async_register_command(hass, websocket_stop_exclusion)
hass.http.register_view(DumpView) # type: ignore
@websocket_api.require_admin
@websocket_api.websocket_command(
{vol.Required(TYPE): "zwave_js/network_status", vol.Required(ENTRY_ID): str}
)
@callback
def websocket_network_status(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Get the status of the Z-Wave JS network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
data = {
"client": {
"ws_server_url": client.ws_server_url,
"state": "connected" if client.connected else "disconnected",
"driver_version": client.version.driver_version,
"server_version": client.version.server_version,
},
"controller": {
"home_id": client.driver.controller.data["homeId"],
"nodes": list(client.driver.controller.nodes),
},
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_status",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@callback
def websocket_node_status(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Get the status of a Z-Wave JS node."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
node_id = msg[NODE_ID]
node = client.driver.controller.nodes[node_id]
data = {
"node_id": node.node_id,
"is_routing": node.is_routing,
"status": node.status,
"is_secure": node.is_secure,
"ready": node.ready,
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.require_admin # type: ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/add_node",
vol.Required(ENTRY_ID): str,
vol.Optional("secure", default=False): bool,
}
)
async def websocket_add_node(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Add a node to the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
include_non_secure = not msg["secure"]
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_added(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
"status": node.status,
"ready": node.ready,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node added", "node": node_details}
)
)
@callback
def device_registered(device: DeviceEntry) -> None:
device_details = {"name": device.name, "id": device.id}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "device registered", "device": device_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
unsubs = [
controller.on("inclusion started", forward_event),
controller.on("inclusion failed", forward_event),
controller.on("inclusion stopped", forward_event),
controller.on("node added", node_added),
async_dispatcher_connect(
hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device_registered
),
]
result = await controller.async_begin_inclusion(include_non_secure)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin # type: ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_inclusion",
vol.Required(ENTRY_ID): str,
}
)
async def websocket_stop_inclusion(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Cancel adding a node to the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
result = await controller.async_stop_inclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin # type: ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_exclusion",
vol.Required(ENTRY_ID): str,
}
)
async def websocket_stop_exclusion(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Cancel removing a node from the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
result = await controller.async_stop_exclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin # type:ignore
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/remove_node",
vol.Required(ENTRY_ID): str,
}
)
async def websocket_remove_node(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Remove a node from the Z-Wave network."""
entry_id = msg[ENTRY_ID]
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
unsubs = [
controller.on("exclusion started", forward_event),
controller.on("exclusion failed", forward_event),
controller.on("exclusion stopped", forward_event),
controller.on("node removed", node_removed),
]
result = await controller.async_begin_exclusion()
connection.send_result(
msg[ID],
result,
)
class DumpView(HomeAssistantView):
"""View to dump the state of the Z-Wave JS server."""
url = "/api/zwave_js/dump/{config_entry_id}"
name = "api:zwave_js:dump"
async def get(self, request: web.Request, config_entry_id: str) -> web.Response:
"""Dump the state of Z-Wave."""
hass = request.app["hass"]
if config_entry_id not in hass.data[DOMAIN]:
raise web_exceptions.HTTPBadRequest
entry = hass.config_entries.async_get_entry(config_entry_id)
msgs = await dump.dump_msgs(entry.data[CONF_URL], async_get_clientsession(hass))
return web.Response(
body="\n".join(json.dumps(msg) for msg in msgs) + "\n",
headers={
hdrs.CONTENT_TYPE: "application/jsonl",
hdrs.CONTENT_DISPOSITION: 'attachment; filename="zwave_js_dump.jsonl"',
},
)
|
project-asap/IReS-Platform
|
asap-tools/experiments/depricated/handler/metrics.py
|
__author__ = 'cmantas'
from tools import *
from json import loads
ms = take_single("select metrics from mahout_kmeans_text where k=15 and documents=90300 and dimensions=53235;")[0]
mj = loads(ms)
cols = iter(["#727272", '#f1595f', '#79c36a', '#599ad3', '#f9a65a','#9e66ab','#cd7058', '#d77fb3'])
def timeline2vaslues(fieldname, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
values.append(v[fieldname])
return times, values
def sum_timeline_vals(fieldnames, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
sum = 0
for i in fieldnames:
if i.startswith("kbps"):
v[i]=int(v[i])
sum += v[i]
values.append(sum)
return times, values
# figure()
fig, ax1 = plt.subplots()
times, values = timeline2vaslues("cpu", mj)
d, = ax1.plot(times, values, color=next(cols))
ax1.set_ylabel('percentage (%)')
times, values = timeline2vaslues("mem", mj)
a, = ax1.plot(times, values, color=next(cols))
ax2 = ax1.twinx()
times, values = sum_timeline_vals(["kbps_read", "kbps_write"], mj)
ax2.set_ylabel("KB/s")
b, = ax2.plot(times, values, color=next(cols))
times, values = sum_timeline_vals(["net_in", "net_out"], mj)
c, = ax2.plot(times, values, color=next(cols))
plt.title("Mahout K-means Cluster Metrics")
plt.legend([d, a, b,c], ["CPU", "MEM", "Disk IO", "Net IO"], loc=3)
show()
|
vishnuprathish/constrained-data-generator
|
file1.py
|
from fingered import *
import random
import csv
import sys
def caac():
records = random.randrange(200,500)
inst3=Xf("r")
inst3.setStats(records,2,(2,records/10),[-1,0],[False,False],0,40000)
inst3.FormData()
inst4=Xf("s")
inst4.setStats(100,2,(2,10),[-1,0],[False,True],0,40000)
inst4.FormData()
print inst3
print inst4
#print "Predicted Cost of Fingered Join from Stats: "
#print "recorSize of file1=" + str(records)
pCost = inst3.getSize() + (inst4.getSize() * inst3.getRuns(1) )+ (inst3.getRuns(1) * inst4.getSize())
#print pCost
#print inst3.eJoin(inst4,1,1)
#print "\n Fingered Join:"
j=JoinReq(inst3,inst4,1,1,True)
tup=j.pull()
while tup is not "eoo":
#print str(tup)
tup=j.pull()
#print "Cost : " + str(j.getCost())
"""
print "\nNested Loop Join:\n"
inst3.reset()
inst4.reset()
k=JoinReq(inst3,inst4,1,1,False)
tup=k.pull()
while tup is not "eoo":
print str(tup)
tup=k.pull()
print "Cost : " + str(k.getCost())
"""
print "Summary:"
print "selected file1size: " + str(records)
print "selected number of runs for file1: " + str(inst3.getRuns(1))
print "Predicted Cost Finger:" + str(pCost)
print "Actual Cost Finger:" + str(j.getCost())
#print "Actual Cost NLJ:" + str(k.getCost())
print "("+ str(records) +","+ str(inst3.getRuns(1)) +","+ str(inst4.getSize()) +","+ str(pCost) +","+ str(j.getCost())+")"
tup = [ str(records), str(inst3.getRuns(1)),str(inst4.getSize()),str(pCost),str(j.getCost())]
print tup
fp = open("toexcel.csv","ab")
writer = csv.writer(fp)
data = [tup]
writer.writerows(data)
for i in range(2):
caac()
|
HybridF5/jacket
|
jacket/api/compute/openstack/compute/legacy_v2/server_metadata.py
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from webob import exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.compute import exception
from jacket.i18n import _
class Controller(object):
"""The server metadata API controller for the OpenStack API."""
def __init__(self):
self.compute_api = cloud.API()
super(Controller, self).__init__()
def _get_metadata(self, context, server_id):
try:
server = common.get_instance(self.compute_api, context, server_id)
meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
meta_dict = {}
for key, value in six.iteritems(meta):
meta_dict[key] = value
return meta_dict
def index(self, req, server_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['compute.context']
return {'metadata': self._get_metadata(context, server_id)}
def create(self, req, server_id, body):
try:
metadata = body['metadata']
except (KeyError, TypeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
if not isinstance(metadata, dict):
msg = _("Malformed request body. metadata must be object")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['compute.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=False)
return {'metadata': new_metadata}
def update(self, req, server_id, id, body):
try:
meta_item = body['meta']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
if not isinstance(meta_item, dict):
msg = _("Malformed request body. meta item must be object")
raise exc.HTTPBadRequest(explanation=msg)
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['compute.context']
self._update_instance_metadata(context,
server_id,
meta_item,
delete=False)
return {'meta': meta_item}
def update_all(self, req, server_id, body):
try:
metadata = body['metadata']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
if not isinstance(metadata, dict):
msg = _("Malformed request body. metadata must be object")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['compute.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
server = common.get_instance(self.compute_api, context, server_id)
return self.compute_api.update_instance_metadata(context,
server,
metadata,
delete)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except (ValueError, AttributeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InvalidMetadata as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'update metadata', server_id)
def show(self, req, server_id, id):
"""Return a single metadata item."""
context = req.environ['compute.context']
data = self._get_metadata(context, server_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(204)
def delete(self, req, server_id, id):
"""Deletes an existing metadata."""
context = req.environ['compute.context']
metadata = self._get_metadata(context, server_id)
if id not in metadata:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
server = common.get_instance(self.compute_api, context, server_id)
try:
self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete metadata', server_id)
def create_resource():
return wsgi.Resource(Controller())
|
drix00/pymcxray
|
pymcxray/serialization/test_Serialization.py
|
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2011 Hendrix Demers"
__license__ = ""
# Standard library modules.
import unittest
import logging
import os.path
import tempfile
import shutil
import time
# Third party modules.
from nose.plugins.skip import SkipTest
# Local modules.
from pymcxray import get_current_module_path
# Project modules
import pymcxray.serialization._Serialization as _Serialization
# Globals and constants variables.
class Test_Serialization(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.serialization = _Serialization._Serialization()
self.tempPath = tempfile.mkdtemp(prefix="Test_Serialization_")
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tempPath)
def testSkeleton(self):
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_init(self):
serialization = _Serialization._Serialization()
self.assertEquals(None, serialization._filename)
self.assertEquals(True, serialization._verbose)
serialization = _Serialization._Serialization(verbose=True)
self.assertEquals(None, serialization._filename)
self.assertEquals(True, serialization._verbose)
serialization = _Serialization._Serialization(verbose=False)
self.assertEquals(None, serialization._filename)
self.assertEquals(False, serialization._verbose)
filenameRef = "_Serialization.ser"
serialization = _Serialization._Serialization(filename=filenameRef)
self.assertEquals(filenameRef, serialization._filename)
self.assertEquals(True, serialization._verbose)
filenameRef = "_Serialization2.ser"
serialization = _Serialization._Serialization(filenameRef)
self.assertEquals(filenameRef, serialization._filename)
self.assertEquals(True, serialization._verbose)
#self.fail("Test if the testcase is working.")
def test_getFilepath(self):
serialization = _Serialization._Serialization()
self.assertRaises(ValueError, serialization.getFilepath)
filenameRef = "_Serialization.ser"
filepathRef = filenameRef
filepathRef = os.path.normpath(filepathRef)
serialization = _Serialization._Serialization(filename=filenameRef)
filepath = serialization.getFilepath()
self.assertEquals(filepathRef, filepath)
filenameRef = "_Serialization.ser"
pathRef = "/casd/csadf/asdfsdaf/"
filepathRef = os.path.join(pathRef, filenameRef)
filepathRef = os.path.normpath(filepathRef)
serialization = _Serialization._Serialization()
serialization.setFilename(filenameRef)
serialization.setPathname(pathRef)
filepath = serialization.getFilepath()
self.assertEquals(filepathRef, filepath)
filenameRef = "_Serialization.ser"
pathRef = "/casd/csadf/asdfsdaf/"
filepathRef = os.path.join(filepathRef, filenameRef)
filepathRef = os.path.normpath(filepathRef)
serialization = _Serialization._Serialization()
serialization.setFilepath(filepathRef)
filepath = serialization.getFilepath()
self.assertEquals(filepathRef, filepath)
#self.fail("Test if the testcase is working.")
def test_setCurrentVersion(self):
version = "1.2.3"
self.serialization.setCurrentVersion(version)
self.assertEquals(version, self.serialization._currentVersion)
self.assertEquals(version, self.serialization.getCurrentVersion())
version = 1.2
self.assertRaises(TypeError, self.serialization.setCurrentVersion, version)
#self.fail("Test if the testcase is working.")
def test_isFile(self):
filepathRef = "/casd/csadf/asdfsdaf/sadfsdaf.ser"
self.serialization.setFilepath(filepathRef)
self.assertFalse(self.serialization.isFile())
filepathRef = get_current_module_path(__file__, "../../test_data/serialization/empty.ser")
if not os.path.isfile(filepathRef):
raise SkipTest
self.serialization.setFilepath(filepathRef)
self.assertTrue(self.serialization.isFile())
#self.fail("Test if the testcase is working.")
def test_deleteFile(self):
filename = "empty.ser"
filepathRef = get_current_module_path(__file__, "../../test_data/serialization/")
filepathRef = os.path.join(filepathRef, filename)
if not os.path.isfile(filepathRef):
raise SkipTest
filepath = os.path.join(self.tempPath, filename)
shutil.copy2(filepathRef, filepath)
self.serialization.setFilepath(filepath)
self.assertTrue(os.path.isfile(filepath))
self.serialization.deleteFile()
self.assertFalse(os.path.isfile(filepath))
#self.fail("Test if the testcase is working.")
def test_isOlderThan(self):
filename = "empty"
filepathRef = get_current_module_path(__file__, "../../test_data/serialization/")
filepathRef = os.path.join(filepathRef, filename+'.ser')
if not os.path.isfile(filepathRef):
raise SkipTest
filepath1 = os.path.join(self.tempPath, filename+'_1'+'.ser')
time.sleep(1.0)
shutil.copy(filepathRef, filepath1)
filepath2 = os.path.join(self.tempPath, filename+'_2'+'.ser')
time.sleep(1.0)
shutil.copy(filepathRef, filepath2)
filepath3 = os.path.join(self.tempPath, filename+'_3'+'.ser')
time.sleep(1.0)
shutil.copy(filepathRef, filepath3)
self.serialization.setFilepath(filepath2)
self.assertFalse(self.serialization.isOlderThan(filepath1))
self.assertFalse(self.serialization.isOlderThan(filepath2))
self.assertTrue(self.serialization.isOlderThan(filepath3))
filepath = "/casd/csadf/asdfsdaf/sadfsdaf.ser"
self.assertFalse(self.serialization.isOlderThan(filepath))
filepath = "/casd/csadf/asdfsdaf/sadfsdaf.ser"
self.serialization.setFilepath(filepath)
self.assertTrue(self.serialization.isOlderThan(filepath3))
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
import nose
nose.runmodule()
|
kidchang/compassv2-api
|
bin/query_switch.py
|
#!/usr/bin/python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""query switch."""
import optparse
import Queue
import threading
import time
from compass.apiclient.restful import Client
class AddSwitch(object):
"""A utility class that handles adding a switch and retrieving
corresponding machines associated with the switch.
"""
def __init__(self, server_url):
print server_url, " ...."
self._client = Client(server_url)
def add_switch(self, queue, ip, snmp_community):
"""Add a switch with SNMP credentials and retrieve attached
server machines.
:param queue: The result holder for the machine details.
:type queue: A Queue object(thread-safe).
:param ip: The IP address of the switch.
:type ip: string.
:param snmp_community: The SNMP community string.
:type snmp_community: string.
"""
status, resp = self._client.add_switch(ip,
version="2c",
community=snmp_community)
if status > 409:
queue.put((ip, (False,
"Failed to add the switch (status=%d)" % status)))
return
if status == 409:
# This is the case where the switch with the same IP already
# exists in the system. We now try to update the switch
# with the given credential.
switch_id = resp['failedSwitch']
status, resp = self._client.update_switch(switch_id,
version="2c",
community=snmp_community)
if status > 202:
queue.put((ip, (False,
"Failed to update the switch (status=%d)" %
status)))
return
switch = resp['switch']
state = switch['state']
switch_id = switch['id']
# if the switch state is not in under_monitoring,
# wait for the poll switch task
while True:
status, resp = self._client.get_switch(switch_id)
if status > 400:
queue.put((ip, (False, "Failed to get switch status")))
return
switch = resp['switch']
state = switch['state']
if state == 'initialized' or state == 'repolling':
time.sleep(5)
else:
break
if state == 'under_monitoring':
# get machines connected to the switch.
status, response = self._client.get_machines(switch_id=switch_id)
if status == 200:
for machine in response['machines']:
queue.put((ip, "mac=%s, vlan=%s, port=%s dbid=%d" % (
machine['mac'],
machine['vlan'],
machine['port'],
machine['id'])))
else:
queue.put((ip, (False,
"Failed to get machines %s" %
response['status'])))
else:
queue.put((ip, (False, "Switch state is %s" % state)))
if __name__ == "__main__":
usage = "usage: %prog [options] switch_ips"
parser = optparse.OptionParser(usage)
parser.add_option("-u", "--server-url", dest="server_url",
default="http://localhost/api",
help="The Compass Server URL")
parser.add_option("-c", "--community", dest="community",
default="public",
help="Switch SNMP community string")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Wrong number of arguments")
threads = []
queue = Queue.Queue()
add_switch = AddSwitch(options.server_url)
print "Add switch to the server. This may take a while ..."
for switch in args[0].split(','):
t = threading.Thread(target=add_switch.add_switch,
args=(queue, switch, options.community))
threads.append(t)
t.start()
for t in threads:
t.join(60)
while True:
try:
ip, result = queue.get(block=False)
print ip, " : ", result
except Queue.Empty:
break
|
melver/bibmanage
|
lib/python/bibman/commands/sync.py
|
# Copyright (c) 2012-2016, Marco Elver <me AT marcoelver.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sync command.
"""
import logging
import os
import datetime
import shutil
import string
from bibman.util import gen_hash_md5, gen_filename_from_bib
class SyncCommand:
def __init__(self, conf, bibfile, excludefiles):
self.conf = conf
self.bibfmt_main = bibfmt_module.BibFmt(bibfile)
indices = [bibfmt_module.FILE, bibfmt_module.CITEKEY]
if self.conf.args.hash:
indices.append(bibfmt_module.HASH)
self.bibfmt_main.build_index(*indices)
self.bibfmt_efs = []
for fh in excludefiles:
bi = bibfmt_module.BibFmt(fh)
bi.build_index(*indices)
self.bibfmt_efs.append(bi)
# Sanity check data and warn
for idx in indices:
main_set = frozenset(self.bibfmt_main.index[idx])
for bi in self.bibfmt_efs:
duplicate_set = set(bi.index[idx])
duplicate_set &= main_set
if len(duplicate_set) != 0:
logging.warning("Duplicates found in '{}': {} = {}".format(
bi.bibfile.name, idx, duplicate_set))
def walk_path(self):
for path in self.conf.args.paths:
if not os.path.isdir(path):
logging.error("Could not find directory: {}".format(path))
continue
for root, dirs, files in os.walk(path):
for f in files:
fullpath = os.path.abspath(os.path.join(root, f))
if fullpath.startswith(os.environ['HOME']):
fullpath = fullpath.replace(os.environ['HOME'], "~", 1)
if fullpath.split(".")[-1] in self.conf.args.extlist:
yield fullpath
def query_exists_in(self, index, value):
"""
Generates all found matches.
"""
for bi in [self.bibfmt_main] + self.bibfmt_efs:
if value in bi.index[index]:
yield bi
def query_exists(self, *args, **kwargs):
"""
Returns first found match only.
"""
for bi in self.query_exists_in(*args, **kwargs):
return bi
return None
def check_hash(self, digest, path):
found = False
for bi in self.query_exists_in(bibfmt_module.HASH, digest):
found = True
query_filepos = bi.query(bibfmt_module.HASH, digest)
query_result = bi.read_entry_dict(query_filepos)
duplicate = query_result["file"]
citekey = query_result["citekey"]
if not os.path.exists(duplicate) and bi.bibfile.writable():
if not self.conf.args.append or not bi.update_in_place(query_filepos,
bibfmt_module.FILE, duplicate, path):
logging.warning("File '{}' missing; suggested fix: update '{}' in '{}' with '{}'".format(
duplicate, citekey, bi.bibfile.name, path))
else:
# Could update in-place
logging.info("Updated entry for '{}' with '{}'".format(
citekey, path))
else:
logging.warning("Duplicate for '{}' found in '{}': citekey = '{}'".format(
path, bi.bibfile.name, citekey))
return found
def verify_hash(self, path):
# Only verify entries in main.
query_filepos = self.bibfmt_main.query(bibfmt_module.FILE, path)
if query_filepos is None: return # not in main
query_result = self.bibfmt_main.read_entry_dict(query_filepos)
digest = gen_hash_md5(os.path.expanduser(path)).hexdigest()
if digest != query_result["md5"]:
logging.warn("MD5 checksum mismatch: {} ({} != {})".format(
path, digest, query_result["md5"]))
def interactive_corrections(self, new_entry_args):
logging.info("Entering interactive corrections mode. Leave blank for default.")
self.bibfmt_main.print_new_entry(**new_entry_args)
for key in new_entry_args:
if key in ["file", "date_added", "md5"]:
continue
user_data = input("'{}' correction: ".format(key))
if len(user_data) > 0:
new_entry_args[key] = user_data
print()
return new_entry_args
def __call__(self):
for path in self.walk_path():
# Check existing entries
if self.query_exists(bibfmt_module.FILE, path) is not None:
if self.conf.args.verify: self.verify_hash(path)
continue
# Generate new entry
new_entry_args = dict(
reftype="misc",
citekey="TODO:{}".format(os.path.basename(path)),
author="",
title="",
year="",
keywords="",
file=path,
annotation="",
date_added=datetime.date.today().strftime("%Y-%m-%d"))
if self.conf.args.hash:
new_entry_args["md5"] = gen_hash_md5(
os.path.expanduser(path)).hexdigest()
# Before we proceed, check if this file is a duplicate of an
# already existing file, and if so, check existing entry is
# still valid; if not valid replace file, otherwise warn user.
if self.check_hash(new_entry_args["md5"], path):
continue
if self.conf.args.remote:
logging.info("Attempting to fetch bibliography information remotely: {}".format(
path))
new_entry_args.update(self.conf.bibfetch(filename=path))
if self.conf.args.interactive:
new_entry_args = self.interactive_corrections(new_entry_args)
if self.conf.args.interactive and self.conf.args.rename:
newpath = os.path.join(os.path.dirname(path), gen_filename_from_bib(new_entry_args))
logging.info("Rename: {} to {}".format(path, newpath))
shutil.move(os.path.expanduser(path), os.path.expanduser(newpath))
new_entry_args["file"] = newpath
path = newpath
# Before we add the new entry, check for duplicate cite-keys
citekey_exists_in = self.query_exists(bibfmt_module.CITEKEY,
new_entry_args["citekey"])
if citekey_exists_in is not None:
logging.debug("Cite-key already exists in '{}': {}".format(
citekey_exists_in.bibfile.name, new_entry_args["citekey"]))
for c in string.ascii_letters:
newcitekey = new_entry_args["citekey"] + c
if self.query_exists(bibfmt_module.CITEKEY, newcitekey) is None:
break
new_entry_args["citekey"] = newcitekey
# Finally, generate new entry
if self.conf.args.append:
logging.info("Appending new entry for: {}".format(path))
self.bibfmt_main.append_new_entry(**new_entry_args)
else:
self.bibfmt_main.print_new_entry(**new_entry_args)
def main(conf):
global bibfmt_module
bibfmt_module = conf.bibfmt_module
try:
bibfile = open(conf.args.bibfile, 'r+')
excludefiles = []
if conf.args.excludes is not None:
for filename in conf.args.excludes:
excludefiles.append(open(filename, "r"))
except Exception as e:
logging.critical("Could not open file: {}".format(e))
return 1
try:
sync_cmd = SyncCommand(conf, bibfile, excludefiles)
sync_cmd()
finally:
bibfile.close()
for fh in excludefiles:
fh.close()
def register_args(parser):
parser.add_argument("-p", "--path", metavar="PATH", type=str,
dest="paths", nargs="+", required=True,
help="Paths to scan and synchronise BIBFILE with.")
parser.add_argument("--extlist", type=str,
dest="extlist", default="pdf", nargs="+",
help="File-extensions to consider for sync. [Default:pdf]")
parser.add_argument("-a", "--append", action='store_true',
dest="append", default=False,
help="Append to BIBFILE instead of printing to stdout.")
parser.add_argument("-e", "--exclude", metavar="EXCLUDE", type=str,
dest="excludes", default=None, nargs="+",
help="Bibliography files to exclude new entries from.")
parser.add_argument("--nohash", action="store_false",
dest="hash", default=True,
help="Do not generate MD5 sums and check duplicates.")
parser.add_argument("-i", "--interactive", action="store_true",
dest="interactive", default=False,
help="Interactive synchronisation, prompting the user for entry corrections.")
parser.add_argument("--remote", action="store_true",
dest="remote", default=False,
help="Enable remote fetching of bibliography entries.")
parser.add_argument("--rename", action="store_true",
dest="rename", default=False,
help="Rename file to be more descriptive; only valid with --interactive.")
parser.add_argument("--verify", action="store_true",
dest="verify", default=False,
help="Verify checksum of all existing entries.")
parser.set_defaults(func=main)
|
acshi/osf.io
|
api/users/serializers.py
|
from rest_framework import serializers as ser
from modularodm.exceptions import ValidationValueError
from api.base.exceptions import InvalidModelValueError
from api.base.serializers import AllowMissing, JSONAPIRelationshipSerializer, HideIfDisabled, \
PrefetchRelationshipsSerializer
from website.models import User
from api.base.serializers import (
JSONAPISerializer, LinksField, RelationshipField, DevOnly, IDField, TypeField,
DateByVersion,
)
from api.base.utils import absolute_reverse, get_user_auth
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'full_name',
'given_name',
'middle_names',
'family_name',
'id'
])
non_anonymized_fields = ['type']
id = IDField(source='_id', read_only=True)
type = TypeField()
full_name = ser.CharField(source='fullname', required=True, label='Full name', help_text='Display name used in the general user interface')
given_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
middle_names = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
family_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
suffix = HideIfDisabled(ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations'))
date_registered = HideIfDisabled(DateByVersion(read_only=True))
active = HideIfDisabled(ser.BooleanField(read_only=True, source='is_active'))
# Social Fields are broken out to get around DRF complex object bug and to make API updating more user friendly.
github = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.github',
allow_blank=True, help_text='GitHub Handle'), required=False, source='social.github')))
scholar = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.scholar',
allow_blank=True, help_text='Google Scholar Account'), required=False, source='social.scholar')))
personal_website = DevOnly(HideIfDisabled(AllowMissing(ser.URLField(required=False, source='social.personal',
allow_blank=True, help_text='Personal Website'), required=False, source='social.personal')))
twitter = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.twitter',
allow_blank=True, help_text='Twitter Handle'), required=False, source='social.twitter')))
linkedin = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.linkedIn',
allow_blank=True, help_text='LinkedIn Account'), required=False, source='social.linkedIn')))
impactstory = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.impactStory',
allow_blank=True, help_text='ImpactStory Account'), required=False, source='social.impactStory')))
orcid = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.orcid',
allow_blank=True, help_text='ORCID'), required=False, source='social.orcid')))
researcherid = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.researcherId',
allow_blank=True, help_text='ResearcherId Account'), required=False, source='social.researcherId')))
researchgate = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.researchGate',
allow_blank=True, help_text='ResearchGate Account'), required=False, source='social.researchGate')))
academia_institution = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.academiaInstitution',
allow_blank=True, help_text='AcademiaInstitution Field'), required=False, source='social.academiaInstitution')))
academia_profile_id = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.academiaProfileID',
allow_blank=True, help_text='AcademiaProfileID Field'), required=False, source='social.academiaProfileID')))
baiduscholar = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.baiduScholar',
allow_blank=True, help_text='Baidu Scholar Account'), required=False, source='social.baiduScholar')))
ssrn = DevOnly(HideIfDisabled(AllowMissing(ser.CharField(required=False, source='social.ssrn',
allow_blank=True, help_text='SSRN Account'), required=False, source='social.ssrn')))
timezone = HideIfDisabled(ser.CharField(required=False, help_text="User's timezone, e.g. 'Etc/UTC"))
locale = HideIfDisabled(ser.CharField(required=False, help_text="User's locale, e.g. 'en_US'"))
links = HideIfDisabled(LinksField(
{
'html': 'absolute_url',
'profile_image': 'profile_image_url',
}
))
nodes = HideIfDisabled(RelationshipField(
related_view='users:user-nodes',
related_view_kwargs={'user_id': '<_id>'},
related_meta={'projects_in_common': 'get_projects_in_common'},
))
registrations = DevOnly(HideIfDisabled(RelationshipField(
related_view='users:user-registrations',
related_view_kwargs={'user_id': '<_id>'},
)))
institutions = HideIfDisabled(RelationshipField(
related_view='users:user-institutions',
related_view_kwargs={'user_id': '<_id>'},
self_view='users:user-institutions-relationship',
self_view_kwargs={'user_id': '<_id>'},
))
class Meta:
type_ = 'users'
def get_projects_in_common(self, obj):
user = get_user_auth(self.context['request']).user
if obj == user:
return user.contributor_to.count()
return obj.n_projects_in_common(user)
def absolute_url(self, obj):
if obj is not None:
return obj.absolute_url
return None
def get_absolute_url(self, obj):
return absolute_reverse('users:user-detail', kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def profile_image_url(self, user):
size = self.context['request'].query_params.get('profile_image_size')
return user.profile_image_url(size=size)
def update(self, instance, validated_data):
assert isinstance(instance, User), 'instance must be a User'
for attr, value in validated_data.items():
if 'social' == attr:
for key, val in value.items():
instance.social[key] = val
else:
setattr(instance, attr, value)
try:
instance.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return instance
class UserAddonSettingsSerializer(JSONAPISerializer):
"""
Overrides UserSerializer to make id required.
"""
id = ser.CharField(source='config.short_name', read_only=True)
user_has_auth = ser.BooleanField(source='has_auth', read_only=True)
links = LinksField({
'self': 'get_absolute_url',
'accounts': 'account_links'
})
class Meta:
type_ = 'user_addons'
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-addon-detail',
kwargs={
'provider': obj.config.short_name,
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
def account_links(self, obj):
# TODO: [OSF-4933] remove this after refactoring Figshare
if hasattr(obj, 'external_accounts'):
return {
account._id: {
'account': absolute_reverse('users:user-external_account-detail', kwargs={
'user_id': obj.owner._id,
'provider': obj.config.short_name,
'account_id': account._id,
'version': self.context['request'].parser_context['kwargs']['version']
}),
'nodes_connected': [n.absolute_api_v2_url for n in obj.get_attached_nodes(account)]
}
for account in obj.external_accounts.all()
}
return {}
class UserDetailSerializer(UserSerializer):
"""
Overrides UserSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RelatedInstitution(JSONAPIRelationshipSerializer):
id = ser.CharField(required=False, allow_null=True, source='_id')
class Meta:
type_ = 'institutions'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class UserInstitutionsRelationshipSerializer(PrefetchRelationshipsSerializer):
data = ser.ListField(child=RelatedInstitution())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return absolute_reverse('users:user-institutions-relationship', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_related_url(self, obj):
return absolute_reverse('users:user-institutions', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
|
google-research/robel
|
robel/dkitty/utils/scripted_reset.py
|
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hardware reset functions for the D'Kitty."""
import numpy as np
from robel.components.builder import ComponentBuilder
from robel.components.robot import RobotComponentBuilder, RobotState
from robel.components.robot.dynamixel_robot import DynamixelRobotComponent
from robel.components.tracking import TrackerComponentBuilder
from robel.components.tracking.tracker import TrackerComponent
from robel.utils.reset_procedure import ResetProcedure
# Maximum values for each joint.
BASEMAX = .8
MIDMAX = 2.4
FOOTMAX = 2.5
# Common parameters for all `set_state` commands.
SET_PARAMS = dict(
error_tol=5 * np.pi / 180, # 5 degrees
last_diff_tol=.1 * np.pi / 180, # 5 degrees
)
# Convenience constants.
PI = np.pi
PI2 = np.pi / 2
PI4 = np.pi / 4
PI6 = np.pi / 6
OUTWARD_TUCK_POSE = np.array([0, -MIDMAX, FOOTMAX, 0, MIDMAX, -FOOTMAX])
INWARD_TUCK_POSE = np.array([0, MIDMAX, -FOOTMAX, 0, -MIDMAX, FOOTMAX])
class ScriptedDKittyResetProcedure(ResetProcedure):
"""Scripted reset procedure for D'Kitty.
This resets the D'Kitty to a standing position.
"""
def __init__(self,
upright_threshold: float = 0.9,
standing_height: float = 0.35,
height_tolerance: float = 0.05,
max_attempts: int = 5):
super().__init__()
self._upright_threshold = upright_threshold
self._standing_height = standing_height
self._height_tolerance = height_tolerance
self._max_attempts = max_attempts
self._robot = None
self._tracker = None
def configure_reset_groups(self, builder: ComponentBuilder):
"""Configures the component groups needed for reset."""
if isinstance(builder, RobotComponentBuilder):
builder.add_group('left', motor_ids=[20, 21, 22, 30, 31, 32])
builder.add_group('right', motor_ids=[10, 11, 12, 40, 41, 42])
builder.add_group('front', motor_ids=[10, 11, 12, 20, 21, 22])
builder.add_group('back', motor_ids=[30, 31, 32, 40, 41, 42])
elif isinstance(builder, TrackerComponentBuilder):
assert 'torso' in builder.group_configs
def reset(self, robot: DynamixelRobotComponent, tracker: TrackerComponent):
"""Performs the reset procedure."""
self._robot = robot
self._tracker = tracker
attempts = 0
while not self._is_standing():
attempts += 1
if attempts > self._max_attempts:
break
if self._is_upside_down():
self._perform_flip_over()
self._perform_tuck_under()
self._perform_stand_up()
def _is_standing(self) -> bool:
"""Returns True if the D'Kitty is fully standing."""
state = self._tracker.get_state('torso', raw_states=True)
height = state.pos[2]
upright = state.rot[2, 2]
print('Upright: {:2f}, height: {:2f}'.format(upright, height))
if upright < self._upright_threshold:
return False
if (np.abs(height - self._standing_height) > self._height_tolerance):
return False
return True
def _get_uprightedness(self) -> float:
"""Returns the uprightedness of the D'Kitty."""
return self._tracker.get_state('torso', raw_states=True).rot[2, 2]
def _is_upside_down(self) -> bool:
"""Returns whether the D'Kitty is upside-down."""
return self._get_uprightedness() < 0
def _perform_flip_over(self):
"""Attempts to flip the D'Kitty over."""
while self._is_upside_down():
print('Is upside down {}; attempting to flip over...'.format(
self._get_uprightedness()))
# Spread flat and extended.
self._perform_flatten()
# If we somehow flipped over from that, we're done.
if not self._is_upside_down():
return
# Tuck in one side while pushing down on the other side.
self._robot.set_state(
{
'left':
RobotState(qpos=np.array([-PI4, -MIDMAX, FOOTMAX] * 2)),
'right': RobotState(qpos=np.array([-PI - PI4, 0, 0] * 2)),
},
timeout=4,
**SET_PARAMS,
)
# Straighten out the legs that were pushing down.
self._robot.set_state(
{
'left': RobotState(qpos=np.array([PI2, 0, 0] * 2)),
'right': RobotState(qpos=np.array([-PI2, 0, 0] * 2)),
},
timeout=4,
**SET_PARAMS,
)
def _perform_tuck_under(self):
"""Tucks the D'Kitty's legs so that they're under itself."""
# Bring in both sides of the D'Kitty while remaining flat.
self._perform_flatten()
# Tuck one side at a time.
for side in ('left', 'right'):
self._robot.set_state(
{side: RobotState(qpos=np.array(INWARD_TUCK_POSE))},
timeout=4,
**SET_PARAMS,
)
def _perform_flatten(self):
"""Makes the D'Kitty go into a flat pose."""
left_pose = INWARD_TUCK_POSE.copy()
left_pose[[0, 3]] = PI2
right_pose = INWARD_TUCK_POSE.copy()
right_pose[[0, 3]] = -PI2
self._robot.set_state(
{
'left': RobotState(qpos=left_pose),
'right': RobotState(qpos=right_pose),
},
timeout=4,
**SET_PARAMS,
)
def _perform_stand_up(self):
"""Makes the D'Kitty stand up."""
# Flip the back and front.
self._robot.set_state(
{
'back': RobotState(
qpos=np.array(OUTWARD_TUCK_POSE[3:].tolist() * 2)),
},
timeout=4,
**SET_PARAMS,
)
self._robot.set_state(
{
'front': RobotState(
qpos=np.array(OUTWARD_TUCK_POSE[:3].tolist() * 2)),
},
timeout=4,
**SET_PARAMS,
)
# Stand straight up.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.zeros(12)),
},
timeout=3,
**SET_PARAMS,
)
# Tuck a bit.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.array([0, PI6, -PI6] * 4)),
},
timeout=1,
**SET_PARAMS,
)
# Stand straight up.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.zeros(12)),
},
timeout=3,
**SET_PARAMS,
)
|
wangheda/youtube-8m
|
youtube-8m-wangheda/all_frame_models/biunilstm_model.py
|
import sys
import models
import model_utils
import math
import numpy as np
import video_level_models
import tensorflow as tf
import utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
class BiUniLstmModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of Bi-Uni LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = int(FLAGS.lstm_cells)
## Batch normalize the input
fw_cell = tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
bw_cell = tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
cell = tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
loss = 0.0
with tf.variable_scope("RNN"):
l1_outputs, l1_states = tf.nn.bidirectional_dynamic_rnn(cell_fw = fw_cell, cell_bw = bw_cell,
inputs = model_input,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
l1_outputs = tf.concat(l1_outputs, axis = 2)
l2_outputs, l2_states = tf.nn.dynamic_rnn(cell=cell,
inputs=l1_outputs,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
state_fw, state_bw = l1_states
state = tf.concat([state_fw, state_bw, l2_states], axis = 1)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
original_input=model_input,
vocab_size=vocab_size,
**unused_params)
|
hkumarmk/oslo.messaging
|
tests/test_exception_serialization.py
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
import testscenarios
from oslo import messaging
from oslo_messaging._drivers import common as exceptions
from oslo_messaging.tests import utils as test_utils
from oslo_serialization import jsonutils
load_tests = testscenarios.load_tests_apply_scenarios
EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins'
class NovaStyleException(Exception):
format = 'I am Nova'
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
message = self.format % kwargs
super(NovaStyleException, self).__init__(message)
class KwargsStyleException(NovaStyleException):
format = 'I am %(who)s'
def add_remote_postfix(ex):
ex_type = type(ex)
message = str(ex)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override,
'__unicode__': str_override})
new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__
try:
ex.__class__ = new_ex_type
except TypeError:
ex.args = (message,) + ex.args[1:]
return ex
class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_log_failure = [
('log_failure', dict(log_failure=True)),
('do_not_log_failure', dict(log_failure=False)),
]
_add_remote = [
('add_remote', dict(add_remote=True)),
('do_not_add_remote', dict(add_remote=False)),
]
_exception_types = [
('bog_standard', dict(cls=Exception,
args=['test'],
kwargs={},
clsname='Exception',
modname=EXCEPTIONS_MODULE,
msg='test')),
('nova_style', dict(cls=NovaStyleException,
args=[],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='I am Nova')),
('nova_style_with_msg', dict(cls=NovaStyleException,
args=['testing'],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='testing')),
('kwargs_style', dict(cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
clsname='KwargsStyleException',
modname=__name__,
msg='I am Oslo')),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure,
cls._add_remote,
cls._exception_types)
def setUp(self):
super(SerializeRemoteExceptionTestCase, self).setUp()
def test_serialize_remote_exception(self):
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(exceptions.LOG, 'error', stub_error)
try:
try:
raise self.cls(*self.args, **self.kwargs)
except Exception as ex:
cls_error = ex
if self.add_remote:
ex = add_remote_postfix(ex)
raise ex
except Exception:
exc_info = sys.exc_info()
serialized = exceptions.serialize_remote_exception(
exc_info, log_failure=self.log_failure)
failure = jsonutils.loads(serialized)
self.assertEqual(self.clsname, failure['class'], failure)
self.assertEqual(self.modname, failure['module'])
self.assertEqual(self.msg, failure['message'])
self.assertEqual([self.msg], failure['args'])
self.assertEqual(self.kwargs, failure['kwargs'])
# Note: _Remote prefix not stripped from tracebacks
tb = cls_error.__class__.__name__ + ': ' + self.msg
self.assertIn(tb, ''.join(failure['tb']))
if self.log_failure:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(0, len(errors), errors)
SerializeRemoteExceptionTestCase.generate_scenarios()
class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_standard_allowed = [__name__]
scenarios = [
('bog_standard',
dict(allowed=_standard_allowed,
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=Exception,
args=['test'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='Exception',
remote_args=('test\ntraceback\ntraceback\n', ),
remote_kwargs={})),
('nova_style',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=[],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('I am Nova', ),
remote_kwargs={})),
('nova_style_with_msg',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=['testing'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('testing', ),
remote_kwargs={})),
('kwargs_style',
dict(allowed=_standard_allowed,
clsname='KwargsStyleException',
modname=__name__,
cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
str='test\ntraceback\ntraceback\n',
remote_name='KwargsStyleException_Remote',
remote_args=('I am Oslo', ),
remote_kwargs={})),
('not_allowed',
dict(allowed=[],
clsname='NovaStyleException',
modname=__name__,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'NovaStyleException',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_module',
dict(allowed=['notexist'],
clsname='Exception',
modname='notexist',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_exception',
dict(allowed=[],
clsname='FarcicalError',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'FarcicalError',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_kwarg',
dict(allowed=[],
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={'foobar': 'blaa'},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('system_exit',
dict(allowed=[],
clsname='SystemExit',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'SystemExit',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
]
def test_deserialize_remote_exception(self):
failure = {
'class': self.clsname,
'module': self.modname,
'message': 'test',
'tb': ['traceback\ntraceback\n'],
'args': self.args,
'kwargs': self.kwargs,
}
serialized = jsonutils.dumps(failure)
ex = exceptions.deserialize_remote_exception(serialized, self.allowed)
self.assertIsInstance(ex, self.cls)
self.assertEqual(self.remote_name, ex.__class__.__name__)
self.assertEqual(self.str, six.text_type(ex))
if hasattr(self, 'msg'):
self.assertEqual(self.msg, six.text_type(ex))
self.assertEqual((self.msg,) + self.remote_args, ex.args)
else:
self.assertEqual(self.remote_args, ex.args)
|
sebrandon1/tempest
|
tempest/lib/services/compute/aggregates_client.py
|
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import aggregates as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import base_compute_client
class AggregatesClient(base_compute_client.BaseComputeClient):
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
body = json.loads(body)
self.validate_response(schema.list_aggregates, resp, body)
return rest_client.ResponseBody(resp, body)
def show_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % aggregate_id)
body = json.loads(body)
self.validate_response(schema.get_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def create_aggregate(self, **kwargs):
"""Create a new aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createAggregate
"""
post_body = json.dumps({'aggregate': kwargs})
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
self.validate_response(schema.create_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def update_aggregate(self, aggregate_id, **kwargs):
"""Update an aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateAggregate
"""
put_body = json.dumps({'aggregate': kwargs})
resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
body = json.loads(body)
self.validate_response(schema.update_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_aggregate(self, aggregate_id):
"""Delete the given aggregate."""
resp, body = self.delete("os-aggregates/%s" % aggregate_id)
self.validate_response(schema.delete_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_aggregate(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'aggregate'
def add_host(self, aggregate_id, **kwargs):
"""Add a host to the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addHost
"""
post_body = json.dumps({'add_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def remove_host(self, aggregate_id, **kwargs):
"""Remove a host from the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#removeAggregateHost
"""
post_body = json.dumps({'remove_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def set_metadata(self, aggregate_id, **kwargs):
"""Replace the aggregate's existing metadata with new metadata.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addAggregateMetadata
"""
post_body = json.dumps({'set_metadata': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_set_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
|
bravomikekilo/mxconsole
|
mxconsole/util/decorator_utils_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""decorator_utils tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from mxconsole.platform import test
from mxconsole.platform import tf_logging as logging
from mxconsole.util import decorator_utils
def _test_function(unused_arg=0):
pass
class GetQualifiedNameTest(test.TestCase):
def test_method(self):
self.assertEqual(
"GetQualifiedNameTest.test_method",
decorator_utils.get_qualified_name(GetQualifiedNameTest.test_method))
def test_function(self):
self.assertEqual("_test_function",
decorator_utils.get_qualified_name(_test_function))
class AddNoticeToDocstringTest(test.TestCase):
def _check(self, doc, expected):
self.assertEqual(
decorator_utils.add_notice_to_docstring(
doc=doc,
instructions="Instructions",
no_doc_str="Nothing here",
suffix_str="(suffix)",
notice=["Go away"]),
expected)
def test_regular(self):
expected = ("Brief (suffix)\n\nGo away\nInstructions\n\nDocstring\n\n"
"Args:\n arg1: desc")
# No indent for main docstring
self._check("Brief\n\nDocstring\n\nArgs:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines not indented
self._check("Brief\n\n Docstring\n\n Args:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines indented as well.
self._check("Brief\n \n Docstring\n \n Args:\n arg1: desc", expected)
# No indent for main docstring, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
# 2 space indent, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
def test_brief_only(self):
expected = "Brief (suffix)\n\nGo away\nInstructions"
self._check("Brief", expected)
self._check("Brief\n", expected)
self._check("Brief\n ", expected)
self._check("\nBrief\n ", expected)
self._check("\n Brief\n ", expected)
def test_no_docstring(self):
expected = "Nothing here\n\nGo away\nInstructions"
self._check(None, expected)
self._check("", expected)
def test_no_empty_line(self):
expected = "Brief (suffix)\n\nGo away\nInstructions\n\nDocstring"
# No second line indent
self._check("Brief\nDocstring", expected)
# 2 space second line indent
self._check("Brief\n Docstring", expected)
# No second line indent, first line blank
self._check("\nBrief\nDocstring", expected)
# 2 space second line indent, first line blank
self._check("\n Brief\n Docstring", expected)
class ValidateCallableTest(test.TestCase):
def test_function(self):
decorator_utils.validate_callable(_test_function, "test")
def test_method(self):
decorator_utils.validate_callable(self.test_method, "test")
def test_callable(self):
class TestClass(object):
def __call__(self):
pass
decorator_utils.validate_callable(TestClass(), "test")
def test_partial(self):
partial = functools.partial(_test_function, unused_arg=7)
decorator_utils.validate_callable(partial, "test")
def test_fail_non_callable(self):
x = 0
self.assertRaises(ValueError, decorator_utils.validate_callable, x, "test")
if __name__ == "__main__":
test.main()
|
opensignal/airflow
|
airflow/www/forms.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from flask_admin.form import DateTimePickerWidget
from wtforms import DateTimeField, SelectField
from flask_wtf import Form
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class DateTimeWithNumRunsForm(Form):
# Date time and number of runs form for tree view, task duration
# and landing times
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
|
alex-ip/geophys_utils
|
geophys_utils/_dem_utils.py
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Created on 05/10/2012
@author: Alex Ip
'''
import os
import sys
import logging
import numpy
from osgeo import osr
import numexpr
import netCDF4
from scipy.ndimage import sobel
from geophys_utils._netcdf_grid_utils import NetCDFGridUtils
from geophys_utils._array_pieces import array_pieces
from geophys_utils._vincenty import vinc_dist
from geophys_utils._blrb import interpolate_grid
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
RADIANS_PER_DEGREE = 0.01745329251994329576923690768489
class earth(object):
# Mean radius
RADIUS = 6371009.0 # (metres)
# WGS-84
#RADIUS = 6378135.0 # equatorial (metres)
#RADIUS = 6356752.0 # polar (metres)
# Length of Earth ellipsoid semi-major axis (metres)
SEMI_MAJOR_AXIS = 6378137.0
# WGS-84
A = 6378137.0 # equatorial radius (metres)
B = 6356752.3142 # polar radius (metres)
F = (A - B) / A # flattening
ECC2 = 1.0 - B**2/A**2 # squared eccentricity
MEAN_RADIUS = (A*2 + B) / 3
# Earth ellipsoid eccentricity (dimensionless)
#ECCENTRICITY = 0.00669438
#ECC2 = math.pow(ECCENTRICITY, 2)
# Earth rotational angular velocity (radians/sec)
OMEGA = 0.000072722052
class DEMUtils(NetCDFGridUtils):
def getFileSizekB(self, path):
"""Gets the size of a file (megabytes).
Arguments:
path: file path
Returns:
File size (MB)
Raises:
OSError [Errno=2] if file does not exist
"""
return os.path.getsize(path) / 1024
def getFileSizeMB(self, path):
"""Gets the size of a file (megabytes).
Arguments:
path: file path
Returns:
File size (MB)
Raises:
OSError [Errno=2] if file does not exist
"""
return self.getFileSizekB(path) / 1024
def get_pixel_size(self, index_tuple):
"""
Returns X & Y sizes in metres of specified pixel as a tuple.
N.B: Pixel ordinates are zero-based from top left
"""
x, y = index_tuple
logger.debug('(x, y) = (%f, %f)', x, y)
native_spatial_reference = osr.SpatialReference()
native_spatial_reference.ImportFromWkt(self.crs)
latlong_spatial_reference = native_spatial_reference.CloneGeogCS()
coord_transform_to_latlong = osr.CoordinateTransformation(native_spatial_reference, latlong_spatial_reference)
# Determine pixel centre and edges in georeferenced coordinates
xw = self.GeoTransform[0] + x * self.GeoTransform[1]
yn = self.GeoTransform[3] + y * self.GeoTransform[5]
xc = self.GeoTransform[0] + (x + 0.5) * self.GeoTransform[1]
yc = self.GeoTransform[3] + (y + 0.5) * self.GeoTransform[5]
xe = self.GeoTransform[0] + (x + 1.0) * self.GeoTransform[1]
ys = self.GeoTransform[3] + (y + 1.0) * self.GeoTransform[5]
logger.debug('xw = %f, yn = %f, xc = %f, yc = %f, xe = %f, ys = %f', xw, yn, xc, yc, xe, ys)
# Convert georeferenced coordinates to lat/lon for Vincenty
lon1, lat1, _z = coord_transform_to_latlong.TransformPoint(xw, yc, 0)
lon2, lat2, _z = coord_transform_to_latlong.TransformPoint(xe, yc, 0)
logger.debug('For X size: (lon1, lat1) = (%f, %f), (lon2, lat2) = (%f, %f)', lon1, lat1, lon2, lat2)
x_size, _az_to, _az_from = vinc_dist(earth.F, earth.A,
lat1 * RADIANS_PER_DEGREE, lon1 * RADIANS_PER_DEGREE,
lat2 * RADIANS_PER_DEGREE, lon2 * RADIANS_PER_DEGREE)
lon1, lat1, _z = coord_transform_to_latlong.TransformPoint(xc, yn, 0)
lon2, lat2, _z = coord_transform_to_latlong.TransformPoint(xc, ys, 0)
logger.debug('For Y size: (lon1, lat1) = (%f, %f), (lon2, lat2) = (%f, %f)', lon1, lat1, lon2, lat2)
y_size, _az_to, _az_from = vinc_dist(earth.F, earth.A,
lat1 * RADIANS_PER_DEGREE, lon1 * RADIANS_PER_DEGREE,
lat2 * RADIANS_PER_DEGREE, lon2 * RADIANS_PER_DEGREE)
logger.debug('(x_size, y_size) = (%f, %f)', x_size, y_size)
return (x_size, y_size)
def get_pixel_size_grid(self, source_array, offsets):
""" Returns grid with interpolated X and Y pixel sizes for given arrays"""
def get_pixel_x_size(x, y):
return self.get_pixel_size((offsets[0] + x, offsets[1] + y))[0]
def get_pixel_y_size(x, y):
return self.get_pixel_size((offsets[0] + x, offsets[1] + y))[1]
pixel_size_function = [get_pixel_x_size, get_pixel_y_size]
pixel_size_grid = numpy.zeros(shape=(source_array.shape[0], source_array.shape[1], 2)).astype(source_array.dtype)
for dim_index in range(2):
interpolate_grid(depth=1,
shape=pixel_size_grid[:,:,dim_index].shape,
eval_func=pixel_size_function[dim_index],
grid=pixel_size_grid[:,:,dim_index])
return pixel_size_grid
def __init__(self, dem_dataset):
"""Constructor
Arguments:
source_dem_nc: NetCDF dataset containing DEM data
"""
# Start of init function - Call inherited constructor first
NetCDFGridUtils.__init__(self, dem_dataset)
def create_dzdxy_arrays(self, elevation_array, offsets):
'''
Function to return two arrays containing dzdx and dzdy values
'''
def pixels_in_m():
'''
Function returning True if pixels are in metres
'''
result = True
for dimension_name in self.data_variable.dimensions:
try:
if self.netcdf_dataset.variables[dimension_name].units == 'm':
continue
else:
result = False
break
except:
result = False
break
return result
native_pixel_x_size = float(abs(self.GeoTransform[1]))
native_pixel_y_size = float(abs(self.GeoTransform[5]))
dzdx_array = sobel(elevation_array, axis=1)/(8. * native_pixel_x_size)
dzdy_array = sobel(elevation_array, axis=0)/(8. * native_pixel_y_size)
if pixels_in_m():
print('Pixels are a uniform size of {} x {} metres.'.format(native_pixel_x_size, native_pixel_y_size))
# Pixel sizes are in metres - use scalars
pixel_x_metres = native_pixel_x_size
pixel_y_metres = native_pixel_y_size
else:
print('Pixels are of varying sizes. Computing and applying pixel size arrays.')
# Compute variable pixel size
m_array = self.get_pixel_size_grid(elevation_array, offsets)
pixel_x_metres = m_array[:,:,0]
pixel_y_metres = m_array[:,:,1]
dzdx_array = numexpr.evaluate("dzdx_array * native_pixel_x_size / pixel_x_metres")
dzdy_array = numexpr.evaluate("dzdy_array * native_pixel_y_size / pixel_y_metres")
return dzdx_array, dzdy_array
def create_slope_array(self, dzdx_array, dzdy_array):
hypotenuse_array = numpy.hypot(dzdx_array, dzdy_array)
slope_array = numexpr.evaluate("arctan(hypotenuse_array) / RADIANS_PER_DEGREE")
#Blank out no-data cells
slope_array[numpy.isnan(slope_array)] = self.data_variable._FillValue
return slope_array
def create_aspect_array(self, dzdx_array, dzdy_array):
# Convert angles from conventional radians to compass heading 0-360
aspect_array = numexpr.evaluate("(450 - arctan2(dzdy_array, -dzdx_array) / RADIANS_PER_DEGREE) % 360")
#Blank out no-data cells
aspect_array[numpy.isnan(aspect_array)] = self.data_variable._FillValue
return aspect_array
def create_slope_and_aspect(self, slope_path=None, aspect_path=None, overlap=4):
'''
Create slope & aspect datasets from elevation
'''
# Copy dataset structure but not data
slope_path = slope_path or os.path.splitext(self.nc_path)[0] + '_slope.nc'
self.copy(slope_path, empty_var_list=[self.data_variable.name])
slope_nc_dataset = netCDF4.Dataset(slope_path, 'r+')
slope_nc_dataset.renameVariable(self.data_variable.name, 'slope')
slope_variable = slope_nc_dataset.variables['slope']
slope_variable.long_name = 'slope expressed in degrees from horizontal (0=horizontal, 90=vertical)'
slope_variable.units = 'degrees'
aspect_path = aspect_path or os.path.splitext(self.nc_path)[0] + '_aspect.nc'
self.copy(aspect_path, empty_var_list=[self.data_variable.name])
aspect_nc_dataset = netCDF4.Dataset(aspect_path, 'r+')
aspect_nc_dataset.renameVariable(self.data_variable.name, 'aspect')
aspect_variable = aspect_nc_dataset.variables['aspect']
aspect_variable.long_name = 'aspect expressed compass bearing of normal to plane (0=North, 90=East, etc.)'
aspect_variable.units = 'degrees'
# Process dataset in small pieces
for piece_array, offsets in array_pieces(self.data_variable,
max_bytes=self.max_bytes if self.opendap else self.max_bytes/2, # Need to allow for multiple arrays in memory
overlap=overlap):
print('Processing array of shape {} at {}'.format(piece_array.shape, offsets))
if type(piece_array) == numpy.ma.masked_array:
piece_array = piece_array.data # Convert from masked array to plain array
piece_array[(piece_array == self.data_variable._FillValue)] = numpy.NaN
# Calculate raw source & destination slices including overlaps
source_slices = [slice(0,
piece_array.shape[dim_index])
for dim_index in range(2)
]
dest_slices = [slice(offsets[dim_index],
offsets[dim_index] + piece_array.shape[dim_index])
for dim_index in range(2)
]
# Trim overlaps off source & destination slices
source_slices = [slice(0 if dest_slices[dim_index].start < overlap else source_slices[dim_index].start+overlap,
piece_array.shape[dim_index] if (self.data_variable.shape[dim_index] - dest_slices[dim_index].stop) < overlap else source_slices[dim_index].stop-overlap)
for dim_index in range(2)
]
dest_slices = [slice(0 if dest_slices[dim_index].start < overlap else dest_slices[dim_index].start+overlap,
self.data_variable.shape[dim_index] if (self.data_variable.shape[dim_index] - dest_slices[dim_index].stop) < overlap else dest_slices[dim_index].stop-overlap)
for dim_index in range(2)
]
print('Computing dzdx and dzdy arrays')
dzdx_array, dzdy_array = self.create_dzdxy_arrays(piece_array, offsets)
print('Computing slope array')
result_array = self.create_slope_array(dzdx_array, dzdy_array)
print('Writing slope array of shape %s at %s'.format(tuple([dest_slices[dim_index].stop - dest_slices[dim_index].start
for dim_index in range(2)
]),
tuple([dest_slices[dim_index].start
for dim_index in range(2)
])
)
)
slope_variable[dest_slices] = result_array[source_slices]
slope_nc_dataset.sync()
print('Computing aspect array')
result_array = self.create_aspect_array(dzdx_array, dzdy_array)
print('Writing aspect array of shape {} at {}'.format(tuple([dest_slices[dim_index].stop - dest_slices[dim_index].start
for dim_index in range(2)
]),
tuple([dest_slices[dim_index].start
for dim_index in range(2)
])
)
)
aspect_variable[dest_slices] = result_array[source_slices]
aspect_nc_dataset.sync()
slope_nc_dataset.close()
print('Finished writing slope dataset %s'.format(slope_path))
aspect_nc_dataset.close()
print('Finished writing aspect dataset %s'.format(aspect_path))
if __name__ == '__main__':
# Define command line arguments
dem_path = sys.argv[1]
try:
slope_path = sys.argv[2]
except:
slope_path = None
try:
aspect_path = sys.argv[3]
except:
aspect_path = None
dem_utils = DEMUtils(dem_path)
dem_utils.create_slope_and_aspect(slope_path, aspect_path)
|
dcramer/taskmaster
|
src/taskmaster/cli/master.py
|
"""
taskmaster.cli.master
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from taskmaster.util import parse_options
from taskmaster.constants import (DEFAULT_LOG_LEVEL,
DEFAULT_ADDRESS, DEFAULT_BUFFER_SIZE)
def run(target, kwargs=None, reset=False, size=DEFAULT_BUFFER_SIZE,
address=DEFAULT_ADDRESS, log_level=DEFAULT_LOG_LEVEL):
from taskmaster.server import Server, Controller
server = Server(address, size=size, log_level=log_level)
controller = Controller(server, target, kwargs=kwargs, log_level=log_level)
if reset:
controller.reset()
controller.start()
def main():
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("--address", dest="address", default=DEFAULT_ADDRESS)
parser.add_option("--size", dest="size", default=DEFAULT_BUFFER_SIZE, type=int)
parser.add_option("--reset", dest="reset", default=False, action='store_true')
parser.add_option("--log-level", dest="log_level", default=DEFAULT_LOG_LEVEL)
(options, args) = parser.parse_args()
if len(args) < 1:
print 'Usage: tm-master <callback> [key=value, key2=value2]'
sys.exit(1)
sys.exit(run(args[0], parse_options(args[1:]), **options.__dict__))
if __name__ == '__main__':
main()
|
tomjshine/pynet
|
week1/ex6.py
|
#!/usr/bin/env python
# import modules needed
from pprint import pprint as pp
import json, yaml
# initialize list and dict
my_list = []
my_dict = {'key1': 'val1','key2': 'val2', 'key3': {'subkey1': 'subval1', 'subkey2': 'subval2'}}
my_list = range(10)
my_list.append(my_dict)
# dump to json
with open('json.txt', 'w') as f:
json.dump(my_list, f, sort_keys=True, indent=4)
# dump to yaml
with open('yaml.txt', 'w') as f:
yaml.dump(my_list, f)
|
valeros/platformio
|
platformio/platforms/linux_x86_64.py
|
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.platforms.base import BasePlatform
from platformio.util import get_systype
class Linux_x86_64Platform(BasePlatform):
"""
Linux x86_64 (64-bit) is a Unix-like and mostly POSIX-compliant
computer operating system (OS) assembled under the model of free and
open-source software development and distribution.
Using host OS (Mac OS X or Linux 64-bit) you can build native
application for Linux x86_64 platform.
http://platformio.org/platforms/linux_i686
"""
PACKAGES = {
"toolchain-gcclinux64": {
"alias": "toolchain",
"default": True
}
}
def __init__(self):
if get_systype() == "linux_x86_64":
del self.PACKAGES['toolchain-gcclinux64']
BasePlatform.__init__(self)
|
FederatedAI/FATE
|
python/federatedml/ensemble/boosting/hetero/hetero_fast_secureboost_guest.py
|
from typing import List
import numpy as np
import functools
from federatedml.ensemble.boosting.hetero.hetero_secureboost_guest import HeteroSecureBoostingTreeGuest
from federatedml.param.boosting_param import HeteroFastSecureBoostParam
from federatedml.ensemble.basic_algorithms import HeteroFastDecisionTreeGuest
from federatedml.ensemble.boosting.hetero import hetero_fast_secureboost_plan as plan
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroFastSecureBoostingTreeGuest(HeteroSecureBoostingTreeGuest):
def __init__(self):
super(HeteroFastSecureBoostingTreeGuest, self).__init__()
self.tree_num_per_party = 1
self.guest_depth = 0
self.host_depth = 0
self.work_mode = consts.MIX_TREE
self.init_tree_plan = False
self.tree_plan = []
self.model_param = HeteroFastSecureBoostParam()
self.model_name = 'HeteroFastSecureBoost'
def _init_model(self, param: HeteroFastSecureBoostParam):
super(HeteroFastSecureBoostingTreeGuest, self)._init_model(param)
self.tree_num_per_party = param.tree_num_per_party
self.work_mode = param.work_mode
self.guest_depth = param.guest_depth
self.host_depth = param.host_depth
if self.work_mode == consts.MIX_TREE and self.EINI_inference:
LOGGER.info('Mix mode of fast-sbt does not support EINI predict, reset to False')
self.EINI_inference = False
def get_tree_plan(self, idx):
if not self.init_tree_plan:
tree_plan = plan.create_tree_plan(self.work_mode, k=self.tree_num_per_party, tree_num=self.boosting_round,
host_list=self.component_properties.host_party_idlist,
complete_secure=self.complete_secure)
self.tree_plan += tree_plan
self.init_tree_plan = True
LOGGER.info('tree plan is {}'.format(self.tree_plan))
return self.tree_plan[idx]
def check_host_number(self, tree_type):
host_num = len(self.component_properties.host_party_idlist)
LOGGER.info('host number is {}'.format(host_num))
if tree_type == plan.tree_type_dict['layered_tree']:
assert host_num == 1, 'only 1 host party is allowed in layered mode'
def fit_a_booster(self, epoch_idx: int, booster_dim: int):
# prepare tree plan
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
LOGGER.info('tree work mode is {}'.format(tree_type))
self.check_host_number(tree_type)
if self.cur_epoch_idx != epoch_idx:
# update g/h every epoch
self.grad_and_hess = self.compute_grad_and_hess(self.y_hat, self.y, self.data_inst)
self.cur_epoch_idx = epoch_idx
g_h = self.get_grad_and_hess(self.grad_and_hess, booster_dim)
tree = HeteroFastDecisionTreeGuest(tree_param=self.tree_param)
tree.init(flowid=self.generate_flowid(epoch_idx, booster_dim),
data_bin=self.data_bin, bin_split_points=self.bin_split_points, bin_sparse_points=self.bin_sparse_points,
grad_and_hess=g_h,
encrypter=self.encrypter, encrypted_mode_calculator=self.encrypted_calculator,
valid_features=self.sample_valid_features(),
host_party_list=self.component_properties.host_party_idlist,
runtime_idx=self.component_properties.local_partyid,
goss_subsample=self.enable_goss,
top_rate=self.top_rate, other_rate=self.other_rate,
task_type=self.task_type,
complete_secure=True if (self.cur_epoch_idx == 0 and self.complete_secure) else False,
cipher_compressing=self.cipher_compressing,
max_sample_weight=self.max_sample_weight,
new_ver=self.new_ver
)
tree.set_tree_work_mode(tree_type, target_host_id)
tree.set_layered_depth(self.guest_depth, self.host_depth)
tree.fit()
self.update_feature_importance(tree.get_feature_importance())
if self.work_mode == consts.LAYERED_TREE:
self.sync_feature_importance()
return tree
@staticmethod
def traverse_guest_local_trees(node_pos, sample, trees: List[HeteroFastDecisionTreeGuest]):
"""
in mix mode, a sample can reach leaf directly
"""
new_node_pos = node_pos + 0 # avoid inplace manipulate
for t_idx, tree in enumerate(trees):
cur_node_idx = new_node_pos[t_idx]
if not tree.use_guest_feat_only_predict_mode:
continue
rs, reach_leaf = HeteroSecureBoostingTreeGuest.traverse_a_tree(tree, sample, cur_node_idx)
new_node_pos[t_idx] = rs
return new_node_pos
@staticmethod
def merge_leaf_pos(pos1, pos2):
return pos1 + pos2
# this func will be called by super class's predict()
def boosting_fast_predict(self, data_inst, trees: List[HeteroFastDecisionTreeGuest], predict_cache=None,
pred_leaf=False):
LOGGER.info('fast sbt running predict')
if self.work_mode == consts.MIX_TREE:
LOGGER.info('running mix mode predict')
tree_num = len(trees)
node_pos = data_inst.mapValues(lambda x: np.zeros(tree_num, dtype=np.int64))
# traverse local trees
traverse_func = functools.partial(self.traverse_guest_local_trees, trees=trees)
guest_leaf_pos = node_pos.join(data_inst, traverse_func)
# get leaf node from other host parties
host_leaf_pos_list = self.hetero_sbt_transfer_variable.host_predict_data.get(idx=-1)
for host_leaf_pos in host_leaf_pos_list:
guest_leaf_pos = guest_leaf_pos.join(host_leaf_pos, self.merge_leaf_pos)
if pred_leaf: # predict leaf, return leaf position only
return guest_leaf_pos
else:
predict_result = self.get_predict_scores(leaf_pos=guest_leaf_pos, learning_rate=self.learning_rate,
init_score=self.init_score, trees=trees,
multi_class_num=self.booster_dim, predict_cache=predict_cache)
return predict_result
else:
LOGGER.debug('running layered mode predict')
return super(HeteroFastSecureBoostingTreeGuest, self).boosting_fast_predict(data_inst, trees, predict_cache,
pred_leaf=pred_leaf)
def load_booster(self, model_meta, model_param, epoch_idx, booster_idx):
tree = HeteroFastDecisionTreeGuest(self.tree_param)
tree.load_model(model_meta, model_param)
tree.set_flowid(self.generate_flowid(epoch_idx, booster_idx))
tree.set_runtime_idx(self.component_properties.local_partyid)
tree.set_host_party_idlist(self.component_properties.host_party_idlist)
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
tree.set_tree_work_mode(tree_type, target_host_id)
if self.tree_plan[epoch_idx][0] == plan.tree_type_dict['guest_feat_only']:
LOGGER.debug('tree of epoch {} is guest only'.format(epoch_idx))
tree.use_guest_feat_only_predict_mode()
return tree
def get_model_meta(self):
_, model_meta = super(HeteroFastSecureBoostingTreeGuest, self).get_model_meta()
meta_name = consts.HETERO_FAST_SBT_GUEST_MODEL + "Meta"
model_meta.work_mode = self.work_mode
return meta_name, model_meta
def get_model_param(self):
_, model_param = super(HeteroFastSecureBoostingTreeGuest, self).get_model_param()
param_name = consts.HETERO_FAST_SBT_GUEST_MODEL + 'Param'
model_param.tree_plan.extend(plan.encode_plan(self.tree_plan))
model_param.model_name = consts.HETERO_FAST_SBT_MIX if self.work_mode == consts.MIX_TREE else \
consts.HETERO_FAST_SBT_LAYERED
return param_name, model_param
def set_model_meta(self, model_meta):
super(HeteroFastSecureBoostingTreeGuest, self).set_model_meta(model_meta)
self.work_mode = model_meta.work_mode
def set_model_param(self, model_param):
super(HeteroFastSecureBoostingTreeGuest, self).set_model_param(model_param)
self.tree_plan = plan.decode_plan(model_param.tree_plan)
|
openstack/neutron-lib
|
neutron_lib/tests/unit/db/test_model_query.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.db import model_query
from neutron_lib import fixture
from neutron_lib.tests import _base
from neutron_lib.utils import helpers
# TODO(boden): find a way to test other model_query functions
class TestHooks(_base.BaseTestCase):
def setUp(self):
super(TestHooks, self).setUp()
self.useFixture(fixture.DBQueryHooksFixture())
def _mock_hook(self, x):
return x
def test_register_hook(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook,
self._mock_hook, result_filters=self._mock_hook)
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
self.assertEqual(hook_ref, d.get(k))
def test_register_hook_non_callables(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook, {}, result_filters={})
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
if k == 'query':
self.assertEqual(hook_ref, d.get(k))
else:
self.assertEqual({}, d.get(k))
def test_get_values(self):
mock_model = mock.Mock()
mock_context = mock.Mock()
with mock.patch.object(
model_query, 'query_with_hooks') as query_with_hooks:
query_with_hooks.return_value = [['value1'], ['value2']]
values = model_query.get_values(mock_context, mock_model,
'fake_field')
self.assertEqual(['value1', 'value2'], values)
query_with_hooks.assert_called_with(
mock_context, mock_model, field='fake_field')
|
InformaticsMatters/squonk
|
components/rdkit-camel/src/main/python/find_props/find_props.py
|
# File to calculate properties for a molecule and add these properties back to the molecules
# property to be calculate will be put in using a request.header string
from java import lang
from com.im.lac.types import MoleculeObject, MoleculeObjectIterable
lang.System.loadLibrary('GraphMolWrap')
from org.RDKit import *
import sys
def num_hba(mol):
"""Function for calculating number of H-bond acceptors
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumHBA(mol)
def num_hbd(mol):
"""Function for calculating number of H-bond donors
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumHBD(mol)
def num_rings(mol):
"""Function for calculating number of rings
Takes an RDKit molecule
Returns an int"""
return RDKFuncs.calcNumRings(mol)
def mol_logp(mol):
"""Function for calculating mol log p
Takes an RDKit molecule
Returns a int"""
return RDKFuncs.calcMolLogP(mol)
def mol_mr(mol):
"""Function to find the mass of a molecule
Takes an RDKit molecule
Returns a float"""
return RDKFuncs.calcMolMR(mol)
# A dictionary to relate functioons tostrings
funct_dict = {"num_hba": num_hba,
"num_hbd": num_hbd,
"num_rings": num_rings,
"mol_logp": mol_logp,
"mol_mr": mol_mr}
def calc_props(rdmol, function):
try:
val = funct_dict[function](rdmol)
except:
val = None
sys.stderr.write("ERROR CALCULATNG PROPERTY -> " + function)
return val
## 1) Stream of molecuels
## 2) String relating to property
|
openstack/tacker
|
tacker/tests/functional/sol_kubernetes/vnflcm/test_kubernetes_multi_ns.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.tests.functional.sol_kubernetes.vnflcm import base as vnflcm_base
class VnfLcmKubernetesMultiNsTest(vnflcm_base.BaseVnfLcmKubernetesTest):
@classmethod
def setUpClass(cls):
super(VnfLcmKubernetesMultiNsTest, cls).setUpClass()
vnf_package_id, cls.vnfd_id = cls._create_and_upload_vnf_package(
cls, cls.tacker_client, "test_cnf_multi_ns",
{"key": "sample_multi_ns_functional"})
cls.vnf_package_ids.append(vnf_package_id)
@classmethod
def tearDownClass(cls):
super(VnfLcmKubernetesMultiNsTest, cls).tearDownClass()
def _test_cnf_scale(self, vnf_instance, aspect_id,
number_of_steps=1, error=False):
scale_level = self._get_scale_level_by_aspect_id(
vnf_instance, aspect_id)
# test scale out
scale_level = self._test_scale(
vnf_instance['id'], 'SCALE_OUT', aspect_id, scale_level,
number_of_steps, error)
if error:
return scale_level
# test scale in
scale_level = self._test_scale(
vnf_instance['id'], 'SCALE_IN', aspect_id, scale_level,
number_of_steps)
return scale_level
def test_multi_tenant_k8s_additional_params(self):
vnf_instance_name = "multi_tenant_k8s_additional_params"
vnf_instance_description = "multi tenant k8s additional params"
files = ["Files/kubernetes/deployment_has_namespace.yaml",
"Files/kubernetes/namespace01.yaml"]
additional_param = {
"lcm-kubernetes-def-files": files,
"namespace": "multi-namespace01"}
# instantiate
vnf_instance = self._create_and_instantiate_vnf_instance(
self.vnfd_id, "simple", vnf_instance_name,
vnf_instance_description, additional_param)
# scale
self._test_cnf_scale(vnf_instance, "vdu1_aspect", number_of_steps=1)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
deployment_target_vnfc = [vnfc_rsc for vnfc_rsc in before_vnfc_rscs if
vnfc_rsc['vduId'] == 'VDU1'][0]
vnfc_instance_id = [deployment_target_vnfc['id']]
# heal
after_vnfc_rscs = self._test_heal(vnf_instance, vnfc_instance_id)
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == deployment_target_vnfc['id']:
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
# terminate
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
def test_multi_tenant_k8s_manifest(self):
vnf_instance_name = "multi_tenant_k8s_manifest"
vnf_instance_description = "multi tenant k8s manifest"
files = ["Files/kubernetes/deployment_has_namespace.yaml",
"Files/kubernetes/namespace02.yaml"]
additional_param = {"lcm-kubernetes-def-files": files}
# instantiate
vnf_instance = self._create_and_instantiate_vnf_instance(
self.vnfd_id, "simple", vnf_instance_name,
vnf_instance_description, additional_param)
# scale
self._test_cnf_scale(vnf_instance, "vdu1_aspect", number_of_steps=1)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
deployment_target_vnfc = [vnfc_rsc for vnfc_rsc in before_vnfc_rscs if
vnfc_rsc['vduId'] == 'VDU1'][0]
vnfc_instance_id = [deployment_target_vnfc['id']]
# heal
after_vnfc_rscs = self._test_heal(vnf_instance, vnfc_instance_id)
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == deployment_target_vnfc['id']:
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
# terminate
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
def test_multi_tenant_k8s_default(self):
vnf_instance_name = "multi_tenant_k8s_default"
vnf_instance_description = "multi tenant k8s default"
files = ["Files/kubernetes/deployment_no_namespace.yaml"]
additional_param = {"lcm-kubernetes-def-files": files}
# instantiate
vnf_instance = self._create_and_instantiate_vnf_instance(
self.vnfd_id, "simple", vnf_instance_name,
vnf_instance_description, additional_param)
# scale
self._test_cnf_scale(vnf_instance, "vdu2_aspect", number_of_steps=1)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
deployment_target_vnfc = [vnfc_rsc for vnfc_rsc in before_vnfc_rscs if
vnfc_rsc['vduId'] == 'VDU2'][0]
vnfc_instance_id = [deployment_target_vnfc['id']]
# heal
after_vnfc_rscs = self._test_heal(vnf_instance, vnfc_instance_id)
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == deployment_target_vnfc['id']:
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
# terminate
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
|
comic/comic-django
|
app/grandchallenge/cases/migrations/0003_auto_20210406_0753.py
|
# Generated by Django 3.1.6 on 2021-04-06 07:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cases", "0002_auto_20210305_1248"),
]
operations = [
migrations.AlterField(
model_name="rawimageuploadsession",
name="status",
field=models.PositiveSmallIntegerField(
choices=[
(0, "Queued"),
(1, "Started"),
(2, "Re-Queued"),
(3, "Failed"),
(4, "Succeeded"),
(5, "Cancelled"),
],
db_index=True,
default=0,
),
),
]
|
tetherless-world/ecoop
|
pyecoop/setup.py
|
# -*- coding: utf-8 -*-
"""
ecoop
====
ecoop_ utility functions to automatize the building of the Ecosystem Status Report for the NE-LME.
.. _ecoop: https://github.com/epifanio/ecoop
.. _ecoop-project: http://tw.rpi.edu/web/project/ECOOP
"""
from distutils.core import setup
import os
import sys
import subprocess
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.")
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
AUTHOR = 'epinux'
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
#print(GIT_REVISION)
return GIT_REVISION
GIT_REVISION = git_version()
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
#if os.path.exists('.git'):
GIT_REVISION = git_version()
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='lib/ecoop/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM ecoop SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'author' : AUTHOR,
'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
with open('README.md') as file:
long_description = file.read()
write_version_py()
setup(
name = 'ecoop',
version = '0.1.0',
description = 'A collecton of utilities to be used from inside an IPython Notebook to automatize the building of the Ecosystem Status Report for the NE-LME - Climate forcing UseCase',
long_description=long_description,
author = 'Massimo Di Stefano',
author_unixid = 'epinux',
author_email = 'epiesasha@me.com',
url = 'http://github.com/epifanio/ecoop',
packages = ['ecoop'],
package_dir = {'': 'lib'},
license = 'BSD 3-Clause license',
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
classifiers = [
'Development Status :: 1 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
)
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import random
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from resource_management.core.shell import call
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.validate import call_and_match_output
from zookeeper import zookeeper
from zookeeper_service import zookeeper_service
class ZookeeperServer(Script):
def get_component_name(self):
return "zookeeper-server"
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env, upgrade_type=None):
import params
env.set_params(params)
zookeeper(type='server', upgrade_type=upgrade_type)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
conf_select.select(params.stack_name, "zookeeper", params.version)
stack_select.select("zookeeper-server", params.version)
#Execute(format("iop-select set zookeeper-server {version}"))
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env, upgrade_type)
zookeeper_service(action = 'start', upgrade_type=upgrade_type)
def post_upgrade_restart(self, env, upgrade_type=None):
if upgrade_type == "nonrolling":
return
Logger.info("Executing Stack Upgrade post-restart")
import params
env.set_params(params)
zk_server_host = random.choice(params.zookeeper_hosts)
cli_shell = format("{zk_cli_shell} -server {zk_server_host}:{client_port}")
# Ensure that a quorum is still formed.
unique = get_unique_id_and_date()
create_command = format("echo 'create /{unique} mydata' | {cli_shell}")
list_command = format("echo 'ls /' | {cli_shell}")
delete_command = format("echo 'delete /{unique} ' | {cli_shell}")
quorum_err_message = "Failed to establish zookeeper quorum"
call_and_match_output(create_command, 'Created', quorum_err_message)
call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message)
call(delete_command)
if params.client_port:
check_leader_command = format("echo stat | nc localhost {client_port} | grep Mode")
code, out = call(check_leader_command, logoutput=False)
if code == 0 and out:
Logger.info(out)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
zookeeper_service(action = 'stop', upgrade_type=upgrade_type)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.zk_pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
# Expect the following files to be available in status_params.config_dir:
# zookeeper_jaas.conf
# zookeeper_client_jaas.conf
try:
props_value_check = None
props_empty_check = ['Server/keyTab', 'Server/principal']
props_read_check = ['Server/keyTab']
zk_env_expectations = build_expectations('zookeeper_jaas', props_value_check, props_empty_check,
props_read_check)
zk_expectations = {}
zk_expectations.update(zk_env_expectations)
security_params = get_params_from_filesystem(status_params.config_dir,
{'zookeeper_jaas.conf': FILE_TYPE_JAAS_CONF})
result_issues = validate_security_config_properties(security_params, zk_expectations)
if not result_issues: # If all validations passed successfully
# Double check the dict before calling execute
if ( 'zookeeper_jaas' not in security_params
or 'Server' not in security_params['zookeeper_jaas']
or 'keyTab' not in security_params['zookeeper_jaas']['Server']
or 'principal' not in security_params['zookeeper_jaas']['Server']):
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.zk_user,
security_params['zookeeper_jaas']['Server']['keyTab'],
security_params['zookeeper_jaas']['Server']['principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
self.put_structured_out({"securityState": "UNSECURED"})
if __name__ == "__main__":
ZookeeperServer().execute()
|
hashdd/pyhashdd
|
hashdd/features/hashdd_filemagic.py
|
"""
hashdd_filemagic.py
@brad_anton
License:
Copyright 2015 hashdd.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import magic
from .feature import feature
class hashdd_filemagic(feature):
def process(self):
if self.buffer:
return magic.from_buffer(self.buffer)
elif self.filename:
return magic.from_file(self.filename)
return None
|
briancurtin/python-openstacksdk
|
examples/image/delete.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from examples.connect import EXAMPLE_IMAGE_NAME
"""
Delete resources with the Image service.
For a full guide see
http://developer.openstack.org/sdks/python/openstacksdk/users/guides/image.html
"""
def delete_image(conn):
print("Delete Image:")
example_image = conn.image.find_image(EXAMPLE_IMAGE_NAME)
conn.image.delete_image(example_image, ignore_missing=False)
|
hmrc/captain
|
captain/tests/util_mock.py
|
from mock import MagicMock
import docker.errors
from requests.exceptions import ConnectionError
import datetime
class ClientMock():
def __init__(self):
def __logs(i, stream=False):
if stream:
return ("this is line {}".format(l) for l in xrange(1, 100))
else:
return "\n".join(["this is line {}".format(l) for l in xrange(1, 3)])
self.client_node1 = MagicMock()
self.client_node1.containers = MagicMock(return_value=self.__containers_cmd_return_node1)
self.client_node1.inspect_container = MagicMock(side_effect=lambda container_id:
self.__get_container(self.__inspect_container_cmd_return_node1,
container_id))
self.client_node1.create_container = MagicMock(return_value={'Id': 'eba8bea2600029'})
self.client_node1.start = MagicMock()
self.client_node1.logs = MagicMock(side_effect=__logs)
self.client_node2 = MagicMock()
self.client_node2.containers = MagicMock(return_value=self.__containers_cmd_return_node2)
self.client_node2.inspect_container = MagicMock(side_effect=lambda container_id:
self.__get_container(self.__inspect_container_cmd_return_node2,
container_id))
self.client_node2.logs = MagicMock(side_effect=__logs)
self.client_node3 = MagicMock()
self.client_node3.containers = MagicMock(side_effect=ConnectionError())
self.client_node3.inspect_container = MagicMock(side_effect=ConnectionError())
self.client_node3.logs = MagicMock(side_effect=__logs)
self.client_node4 = MagicMock()
self.client_node4.containers = MagicMock(side_effect=ConnectionError())
self.client_node4.inspect_container = MagicMock(side_effect=ConnectionError())
self.client_node4.logs = MagicMock(side_effect=__logs)
def mock_one_docker_node(self, docker_client):
docker_client.side_effect = self.__side_effect
return self.client_node1
def mock_two_docker_nodes(self, docker_client):
docker_client.side_effect = self.__side_effect
return self.client_node1, self.client_node2, self.client_node3
def __side_effect(self, base_url, version, timeout):
if "node-1" in base_url:
return self.client_node1
if "node-2" in base_url:
return self.client_node2
if "node-3" in base_url:
return self.client_node3
if "node-4" in base_url:
return self.client_node4
raise Exception("{} not mocked".format(base_url))
def __get_container(self, data, container_id):
try:
return data[container_id]
except KeyError as e:
raise docker.errors.APIError(e, "dummy", explanation="No such container: {}".format(container_id))
__containers_cmd_return_node1 = [
{u'Command': u'/runner/init start web',
u'Created': 1408697397,
u'Id': u'656ca7c307d178',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/ers-checking-frontend-27'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 8080,
u'PublicPort': 9225,
u'Type': u'tcp'}],
u'Status': u'Up 40 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1408696448,
u'Id': u'eba8bea2600029',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/paye_216'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 8080,
u'PublicPort': 9317,
u'Type': u'tcp'}],
u'Status': u'Up 56 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1406886169,
u'Id': u'381587e2978216',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/ers_5'],
u'Ports': [],
u'Status': u'Exited (127) 4 weeks ago'},
# Weird edge case when docker doesn't fill in the container status
{u'Command': u'/runner/init start web',
u'Created': 1406886169,
u'Id': u'3815178hgdasf6',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/ers_5'],
u'Ports': [],
u'Status': u''}]
__containers_cmd_return_node2 = [
{u'Command': u'/runner/init start web',
u'Created': 1408687834,
u'Id': u'80be2a9e62ba00',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/paye_216_8020e407-e40a-478e-9f31-a43bb50d8410'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 8080,
u'PublicPort': 9317,
u'Type': u'tcp'}],
u'Status': u'Up 19 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1408696448,
u'Id': u'jh23899fg00029',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/noport_216'],
u'Ports': [],
u'Status': u'Up 56 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1408696448,
u'Id': u'oiwq569fg00029',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/wrongport_216'],
u'Ports': [{u'IP': u'0.0.0.0',
u'PrivatePort': 1234,
u'PublicPort': 9317,
u'Type': u'tcp'}],
u'Status': u'Up 56 minutes'},
{u'Command': u'/runner/init start web',
u'Created': 1409767240,
u'Id': u'389821jsv78216',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/myservice_10'],
u'Ports': [],
u'Status': u'Exited (127) 21 hours ago'},
{u'Command': u'/runner/init start web',
u'Created': 1426151640,
u'Id': u'61c2695fd82a',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/fresh_5'],
u'Ports': [],
u'Status': u''},
{u'Command': u'/runner/init start web',
u'Created': 1426151640,
u'Id': u'61c2695fd82b',
u'Image': u'hmrc/slugrunner:latest',
u'Names': [u'/notfresh_5'],
u'Ports': [],
u'Status': u''}]
__inspect_container_cmd_return_node1 = {
"381587e2978216": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/381587e2978216/hostname',
u'HostsPath': u'/var/lib/docker/containers/381587e2978216/hosts',
u'Id': u'381587e2978216',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 127,
# This date format tests the fact that datetime wants microseconds but docker returns a higher granularity.
u'FinishedAt': "{}1234Z".format((datetime.datetime.now() - datetime.timedelta(days=1, minutes=10)).strftime('%Y-%m-%dT%H:%M:%S.%f')),
u'Paused': False,
u'Pid': 35327,
u'Running': False,
u'StartedAt': u'2014-09-02T08:49:57.906207449Z'},
u'Volumes': {},
u'VolumesRW': {}},
"3815178hgdasf6": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/3815178hgdasf6/hostname',
u'HostsPath': u'/var/lib/docker/containers/3815178hgdasf6/hosts',
u'Id': u'3815178hgdasf6',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 127,
# Weird date that docker sometimes returns in containers with no Status
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 35327,
u'Running': False,
u'StartedAt': u'0001-01-01T00:00:00Z'},
u'Volumes': {},
u'VolumesRW': {}},
"656ca7c307d178": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/656ca7c307d178/hostname',
u'HostsPath': u'/var/lib/docker/containers/656ca7c307d178/hosts',
u'Id': u'656ca7c307d178',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 35327,
u'Running': True,
u'StartedAt': u'2014-08-22T08:49:57.906207449Z'},
u'Volumes': {},
u'VolumesRW': {}},
"eba8bea2600029": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/paye_216.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'eba8bea26000',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:34:08.134031634Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/eba8bea2600029/hostname',
u'HostsPath': u'/var/lib/docker/containers/eba8bea2600029/hosts',
u'Id': u'eba8bea2600029',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/paye_216',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.221',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 30996,
u'Running': True,
u'StartedAt': u'2014-08-22T08:34:08.260419303Z'},
u'Volumes': {},
u'VolumesRW': {}}
}
__inspect_container_cmd_return_node2 = {
"389821jsv78216": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=https://host/ers-checking-frontend_27.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'656ca7c307d1',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:49:57.80805632Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/389821jsv78216/hostname',
u'HostsPath': u'/var/lib/docker/containers/389821jsv78216/hosts',
u'Id': u'389821jsv78216',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/ers-checking-frontend-27',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.224',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9225'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 127,
u'FinishedAt': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
u'Paused': False,
u'Pid': 35327,
u'Running': False,
u'StartedAt': u'2014-09-03T17:49:57.906207449Z'},
u'Volumes': {},
u'VolumesRW': {}},
"80be2a9e62ba00": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 2,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'HMRC_CONFIG=-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',
u'JAVA_OPTS=-Xmx256m -Xms256m',
u'SLUG_URL=http://example.com/paye/paye_216.tgz',
u'PORT=8080',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'80be2a9e62ba',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
u'Created': u'2014-08-22T08:33:11.343161034Z',
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': [],
u'NetworkMode': u'bridge',
u'PortBindings': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]},
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'/var/lib/docker/containers/80be2a9e62ba00/hostname',
u'HostsPath': u'/var/lib/docker/containers/80be2a9e62ba00/hosts',
u'Id': u'80be2a9e62ba00',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/paye_216_8020e407-e40a-478e-9f31-a43bb50d8410',
u'NetworkSettings': {u'Bridge': u'docker0',
u'Gateway': u'172.17.42.1',
u'IPAddress': u'172.17.3.221',
u'IPPrefixLen': 16,
u'PortMapping': None,
u'Ports': {u'8080/tcp': [{u'HostIp': u'0.0.0.0',
u'HostPort': u'9317'}]}},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'/etc/resolv.conf',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 30996,
u'Running': True,
u'StartedAt': u'2014-08-22T08:33:39.241960303Z'},
u'Volumes': {},
u'VolumesRW': {}},
"61c2695fd82a": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 0,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'TEST=yes',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'captain-test',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
# This is a freshly created but not yet started container
u'Created': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': None,
u'NetworkMode': u'',
u'PortBindings': None,
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'',
u'HostsPath': u'',
u'Id': u'61c2695fd82a',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/fresh_5',
u'NetworkSettings': {u'Bridge': u'',
u'Gateway': u'',
u'IPAddress': u'',
u'IPPrefixLen': 0,
u'PortMapping': None,
u'Ports': None},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 0,
u'Running': False,
u'StartedAt': u'0001-01-01T00:00:00Z'},
u'Volumes': None,
u'VolumesRW': None},
"61c2695fd82b": {
u'Args': [u'start', u'web'],
u'Config': {u'AttachStderr': False,
u'AttachStdin': False,
u'AttachStdout': False,
u'Cmd': [u'start', u'web'],
u'CpuShares': 0,
u'Cpuset': u'',
u'Domainname': u'',
u'Entrypoint': [u'/runner/init'],
u'Env': [u'TEST=yes',
u'HOME=/',
u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'],
u'ExposedPorts': {u'8080/tcp': {}},
u'Hostname': u'captain-test',
u'Image': u'hmrc/slugrunner',
u'Memory': 0,
u'MemorySwap': 0,
u'NetworkDisabled': False,
u'OnBuild': None,
u'OpenStdin': False,
u'PortSpecs': None,
u'StdinOnce': False,
u'Tty': False,
u'User': u'',
u'Volumes': None,
u'WorkingDir': u''},
# This is a epoch dated state container but with an old, gc-able created date
u'Created': (datetime.datetime.now() - datetime.timedelta(days=1, minutes=10)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
u'Driver': u'aufs',
u'ExecDriver': u'native-0.2',
u'HostConfig': {u'Binds': None,
u'ContainerIDFile': u'',
u'Dns': None,
u'DnsSearch': None,
u'Links': None,
u'LxcConf': None,
u'NetworkMode': u'',
u'PortBindings': None,
u'Privileged': False,
u'PublishAllPorts': False,
u'VolumesFrom': None},
u'HostnamePath': u'',
u'HostsPath': u'',
u'Id': u'61c2695fd82b',
u'Image': u'c0cd53268e0c7c42bac84b6bf4f51561720c33f5239aa809f1135cc69cc73a2a',
u'MountLabel': u'',
u'Name': u'/notfresh_5',
u'NetworkSettings': {u'Bridge': u'',
u'Gateway': u'',
u'IPAddress': u'',
u'IPPrefixLen': 0,
u'PortMapping': None,
u'Ports': None},
u'Path': u'/runner/init',
u'ProcessLabel': u'',
u'ResolvConfPath': u'',
u'State': {u'ExitCode': 0,
u'FinishedAt': u'0001-01-01T00:00:00Z',
u'Paused': False,
u'Pid': 0,
u'Running': False,
u'StartedAt': u'0001-01-01T00:00:00Z'},
u'Volumes': None,
u'VolumesRW': None}
}
|
joerideturck/gcloud-python-bigtable
|
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Google Cloud Bigtable documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 24 16:48:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from email import message_from_string
import os
from pkg_resources import get_distribution
import sys
import types
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Google Cloud Bigtable'
copyright = u'2015, Google'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
distro = get_distribution('gcloud_bigtable')
release = os.getenv('SPHINX_RELEASE', distro.version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_components/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
def add_grpc_mock(grpc_mod, subpackage, module_names):
full_subpackage = 'grpc.' + subpackage
subpackage_mod = types.ModuleType(full_subpackage)
sys.modules[full_subpackage] = subpackage_mod
setattr(grpc_mod, subpackage, subpackage_mod)
for module_name in module_names:
full_mod_name = full_subpackage + '.' + module_name
mod_obj = types.ModuleType(full_mod_name)
sys.modules[full_mod_name] = mod_obj
setattr(subpackage_mod, module_name, mod_obj)
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if os.environ.get('READTHEDOCS', None) == 'True':
# Really nasty hack so that readthedocs.org can successfully build these
# docs even though gRPC can't be installed.
grpc_mod = types.ModuleType('grpc')
sys.modules['grpc'] = grpc_mod
add_grpc_mock(grpc_mod, '_adapter', ['_c'])
add_grpc_mock(grpc_mod, 'early_adopter', ['implementations'])
add_grpc_mock(grpc_mod, 'framework', ['alpha'])
name = 'grpc.framework.alpha.utilities'
util_mod = types.ModuleType(name)
sys.modules[name] = util_mod
sys.modules['grpc.framework.alpha'].utilities = util_mod
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GoogleCloudBigtabledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
metadata = distro.get_metadata(distro.PKG_INFO)
author = message_from_string(metadata).get('Author')
latex_documents = [
(master_doc, 'GoogleCloudBigtable.tex', u'Google Cloud Bigtable Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'googlecloudbigtable', u'Google Cloud Bigtable Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GoogleCloudBigtable', u'Google Cloud Bigtable Documentation',
author, 'GoogleCloudBigtable', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Configuration for intersphinx:
# Refer to the Python standard library and oauth2client library.
# NOTE: We also have a custom mapping for items in the stdlib not
# included in https://docs.python.org/objects.inv
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'oauth2client': ('http://oauth2client.readthedocs.org/en/latest/', None),
}
|
neurodata/synaptome-stats
|
collman15v2/201710/annoStats.py
|
#!/usr/bin/env python3
###
###
###
### Jesse Leigh Patsolic
### 2017 <jpatsol1@jhu.edu>
### S.D.G
#
import argparse
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from intern.utils.parallel import block_compute
import configparser
import requests
import numpy as np
from numpy import genfromtxt
import shutil
import blosc
from IPython.core.debugger import set_trace
import sys
import os
import itertools
from functools import partial
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import cpu_count
import csv
import datetime
import toolbox
def main(COLL_NAME, EXP_NAME, COORD_FRAME,
CHAN_NAMES=None, num_threads = 4, CONFIG_FILE= 'config.ini'):
bf = [5,180,180] # in z,y,x order
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
TOKEN = config['Default']['token']
boss_url = ''.join( ( config['Default']['protocol'],'://',config['Default']['host'],'/v1/' ) )
#print(boss_url)
#'https://api.boss.neurodata.io/v1/'
#intern
rem = BossRemote(CONFIG_FILE)
cf = CoordinateFrameResource(str(COLL_NAME + '_' + EXP_NAME))
cfr = rem.get_project(cf)
anno_res = ChannelResource('annotation', COLL_NAME, EXP_NAME, 'annotation', datatype='uint64')
ex = {'x': cfr.x_stop, 'y': cfr.y_stop, 'z': cfr.z_stop}
blocks = block_compute(0,ex['x'],0,ex['y'],0,ex['z'],
origin = (0,0,0), block_size = (512, 512, 16))
rid = []
for b in blocks:
rid = rid + rem.get_ids_in_region(anno_res, 0, b[0], b[1], b[2], [0,1])
u = np.unique(np.asarray(rid)) ## returns in sorted order ascending
## bounding box for annotation_i
bb = [rem.get_bounding_box(anno_res, 0,ui, 'tight') for ui in u]
for i in range(len(bb)):
bb[i]["id"] = u[i]
A = [(rem.get_cutout(
anno_res, 0, bb[i]["x_range"],
bb[i]["y_range"], bb[i]["z_range"],
id_list = [bb[i]['id']]) != 0).astype(int)
for i in range(len(bb))]
#Bmeans = [np.int32(np.round(np.mean(np.asarray(np.where(A[i] == True)),1))) for i in range(len(A))]
Bmeans = [np.int32(np.round(np.mean(np.asarray(np.where(A[i] == 1)),1))) for i in range(len(A))]
Bglobal = []
for i in range(len(bb)):
ad1 = np.asarray([bb[i]['z_range'][0], bb[i]['y_range'][0], bb[i]['x_range'][0]])
Bglobal.append(Bmeans[i] + ad1)
ColMin = np.asarray(bf)
ColMax = np.asarray([ex['z'] - (bf[0] + 1), # The z index is inclusive
ex['y'] - (bf[1] + 1),
ex['x'] - (bf[2] + 1)])
m = [Bglobal[i] >= ColMin for i in range(len(Bglobal))]
M = [Bglobal[i] <= ColMax for i in range(len(Bglobal))]
mm = [np.all(m[i]) for i in range(len(m)) ]
MM = [np.all(M[i]) for i in range(len(M)) ]
Bcon = []
con = [np.asarray(mm[j] and MM[j]) for j in range(len(mm))]
for i in range(len(Bglobal)):
if con[i]:
Bcon.append(Bglobal[i])
if CHAN_NAMES is None:
CHAN_NAMES = ['DAPI1st', 'DAPI2nd', 'DAPI3rd',
'GABA488', 'GAD647', 'gephyrin594', 'GS594', 'MBP488',
'NR1594', 'PSD95_488', 'Synapsin647', 'VGluT1_647']
#CHAN_NAMES = ['bIIItubulin', 'DAPI_2nd', 'DAPI_3rd',
# 'GABA', 'GAD2', 'gephyrin', 'NR1', 'PSDr',
# 'synapsin', 'VGAT', 'VGluT1']
ChanList = []
## for getting masked
#for ch in CHAN_NAMES:
# di = [{
# 'rem': rem,
# 'ch_rsc':
# ChannelResource(ch,COLL_NAME,EXP_NAME,'image',datatype='uint8'),
# 'ch' : ch,
# 'res' : 0,
# 'xrng': bb[i]['x_range'],
# 'yrng': bb[i]['y_range'],
# 'zrng': bb[i]['z_range'],
# 'id' : bb[i],
# 'mask': A[i]
# } for i in range(len(bb)) if con[i]]
# with ThreadPool(num_threads) as tp:
# out = tp.map(toolbox.getMaskData, di)
# sys.stdout.flush() #DEBUG
# print(ch) ##DEBUG
# sys.stdout.flush() #DEBUG
# ChanList.append(np.asarray(out))
#cubes = np.asarray(ChanList)
## For getting bounding box around centroid of annotation
#for ch in CHAN_NAMES:
# di = [{
# 'rem': rem,
# 'ch_rsc':
# ChannelResource(ch,COLL_NAME,EXP_NAME,'image',datatype='uint8'),
# 'ch' : ch,
# 'res' : 0,
# 'xrng': [Bcon[i][2] - bf[2], Bcon[i][2] + bf[2] + 1],
# 'yrng': [Bcon[i][1] - bf[1], Bcon[i][1] + bf[1] + 1],
# 'zrng': [Bcon[i][0] - bf[0], Bcon[i][0] + bf[0] + 1],
# } for i in range(len(Bcon))]
# with ThreadPool(num_threads) as tp:
# out = tp.map(toolbox.getCube, di)
# print(ch) ##DEBUG
# sys.stdout.flush() #DEBUG
# ChanList.append(np.asarray(out))
#cubes = np.asarray(ChanList)
## for getting all regardles of near boundary
for ch in CHAN_NAMES:
di = [{
'rem': rem,
'ch_rsc':
ChannelResource(ch,COLL_NAME,EXP_NAME,'image',datatype='uint8'),
'ch' : ch,
'res' : 0,
'xrng': [max([Bglobal[i][2] - bf[2], 0]), min([Bglobal[i][2] + bf[2] + 1, ex['x']])],
'yrng': [max([Bglobal[i][1] - bf[1], 0]), min([Bglobal[i][1] + bf[1] + 1, ex['y']])],
'zrng': [max([Bglobal[i][0] - bf[0], 0]), min([Bglobal[i][0] + bf[0] + 1, ex['z']])]
} for i in range(len(Bglobal))]
with ThreadPool(num_threads) as tp:
out = tp.map(toolbox.getCube, di)
print(ch) ##DEBUG
sys.stdout.flush() #DEBUG
ChanList.append(np.asarray(out))
cubes = ChanList
loc = np.asarray(Bglobal)
return(cubes, loc)
## END main
def testMain():
COLL_NAME = 'collman'
EXP_NAME = 'collman15v2'
COORD_FRAME = 'collman_collman15v2'
CONFIG_FILE = 'config.ini'
OUTPUT = 'fmaxTest20171214.csv'
CHAN_NAMES = ['Synapsin647', 'VGluT1_647']
#CHAN_NAMES = ['DAPI1st', 'DAPI2nd', 'DAPI3rd', 'GABA488', 'GAD647',
# 'gephyrin594', 'GS594', 'MBP488', 'NR1594', 'PSD95_488',
# 'Synapsin647', 'VGluT1_647']
#CHAN_NAMES = ['synapsin', 'PSDr']
cubes, loc = main(COLL_NAME, EXP_NAME, COORD_FRAME,
CHAN_NAMES=CHAN_NAMES, num_threads = 6, CONFIG_FILE= 'config.ini')
Fmaxb = toolbox.Fmaxb(cubes)
#F0 = toolbox.F0(cubes)
#Fmax = toolbox.Fmax(cubes)
#toolbox.mainOUT(Fmax, CHAN_NAMES, OUTPUT)
#toolbox.toh5(EXP_NAME, OUTPUT + '.h5', CHAN_NAMES, cubes, loc, Fmax)
return(cubes, loc, Fmaxb)
## End testMain
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'Get volume normalized F0 values from annotation id regions in the BOSS ')
parser.add_argument('-C', help='Valid collection id',
type = str, metavar='C', default='collman')
parser.add_argument('-E', help='Valid experiment id',
type = str, metavar='E', default='collman15v2')
parser.add_argument('-F', help='valid coordinate frame',
type = str, metavar='F', default='collman_collman15v2')
#toolbox.toh5(EXP_NAME, OUTPUT + '.h5', CHAN_NAMES, cubes, loc, F0)
parser.add_argument('-O', help='output filename',
type = str, metavar='O', required=True,
default = 'output')
parser.add_argument('--con', help='user config file for BOSS'
'authentication', type = str, metavar='con', required=True)
args = parser.parse_args()
COLL_NAME = args.C
EXP_NAME = args.E
COORD_FRAME = args.F
OUTPUT = args.O
CONFIG_FILE = args.con
#rem = BossRemote(CONFIG_FILE)
#CHAN_NAMES = rem.list_channels(COLL_NAME, EXP_NAME)
##collman15v2 channels
CHAN_NAMES = ['DAPI1st', 'DAPI2nd', 'DAPI3rd', 'GABA488', 'GAD647',
'gephyrin594', 'GS594', 'MBP488', 'NR1594', 'PSD95_488',
'Synapsin647', 'VGluT1_647']
##collman14v2 channels
#CHAN_NAMES = ['bIIItubulin', 'DAPI_2nd', 'DAPI_3rd',
# 'GABA', 'GAD2', 'VGAT', 'gephyrin',
# 'NR1', 'VGluT1', 'synapsin', 'PSDr']
cubes, loc = main(COLL_NAME, EXP_NAME, COORD_FRAME,
CHAN_NAMES=CHAN_NAMES,
num_threads = 6, CONFIG_FILE= CONFIG_FILE)
#F0 = toolbox.F0(cubes)
Fmax = toolbox.Fmaxb(cubes)
toolbox.mainOUT(Fmax, CHAN_NAMES, OUTPUT)
idx = np.argsort([3,2,1])
toolbox.mainOUT(np.transpose(loc[:,idx]), ['x','y','z'], "locations_"+OUTPUT)
#toolbox.toh5(EXP_NAME, OUTPUT + '.h5', CHAN_NAMES, cubes, loc, F0)
print('Done!')
|
kubeflow/examples
|
mnist/web-ui/mnist_client.py
|
#!/usr/bin/env python2.7
'''
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import print_function
import logging
from grpc.beta import implementations
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from PIL import Image # pylint: disable=wrong-import-order
def get_prediction(image, server_host='127.0.0.1', server_port=9000,
server_name="server", timeout=10.0):
"""
Retrieve a prediction from a TensorFlow model server
:param image: a MNIST image represented as a 1x784 array
:param server_host: the address of the TensorFlow server
:param server_port: the port used by the server
:param server_name: the name of the server
:param timeout: the amount of time to wait for a prediction to complete
:return 0: the integer predicted in the MNIST image
:return 1: the confidence scores for all classes
:return 2: the version number of the model handling the request
"""
print("connecting to:%s:%i" % (server_host, server_port))
# initialize to server connection
channel = implementations.insecure_channel(server_host, server_port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# build request
request = predict_pb2.PredictRequest()
request.model_spec.name = server_name
request.model_spec.signature_name = 'serving_default'
request.inputs['x'].CopyFrom(
tf.contrib.util.make_tensor_proto(image, shape=image.shape))
# retrieve results
result = stub.Predict(request, timeout)
resultVal = result.outputs["classes"].int_val[0]
scores = result.outputs['predictions'].float_val
version = result.outputs["classes"].int_val[0]
return resultVal, scores, version
def random_mnist(save_path=None):
"""
Pull a random image out of the MNIST test dataset
Optionally save the selected image as a file to disk
:param savePath: the path to save the file to. If None, file is not saved
:return 0: a 1x784 representation of the MNIST image
:return 1: the ground truth label associated with the image
:return 2: a bool representing whether the image file was saved to disk
"""
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
batch_size = 1
batch_x, batch_y = mnist.test.next_batch(batch_size)
saved = False
if save_path is not None:
# save image file to disk
try:
data = (batch_x * 255).astype(np.uint8).reshape(28, 28)
img = Image.fromarray(data, 'L')
img.save(save_path)
saved = True
except Exception as e: # pylint: disable=broad-except
logging.error("There was a problem saving the image; %s", e)
return batch_x, np.argmax(batch_y), saved
|
tensorflow/lingvo
|
lingvo/tasks/milan/input_generator.py
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Milan input generator."""
import functools
import re
from absl import logging
from lingvo import compat as tf
from lingvo.core import base_input_generator
from lingvo.core import py_utils
class MilanInputGenerator(base_input_generator.BaseInputGenerator):
"""Common input generator for Milan.
This class mostly wraps a user-provided `dataset_fn`, which when called
returns a `tf.data.Dataset` of batched examples to use as input. The function
must be callable with a batch_size argument, as
```dataset = p.dataset_fn(batch_size=42, **p.dataset_fn_kwargs)```.
The `preprocessors` param enables features to be transformed through a layer
before being fed to the model. These are configured as a map of feature name
to layer params. For example, setting ::
preprecessors['foo'] = FooPreprocessor.Params()
causes feature `foo` to be replaced with the output of `FooPreprocessor`.
"""
@classmethod
def Params(cls):
"""Returns `Params` object for configuring this input generator.
Callers must set `dataset_fn` before before instantiating the input
generator.
"""
p = super().Params()
p.Define(
'dataset_fn', None, 'Function that constructs a tf.data.Dataset '
'of input examples. Must be callable as: '
'dataset_fn(batch_size=42, **dataset_fn_kwargs).')
p.Define(
'dataset_fn_kwargs', {}, 'Dict of kwargs to pass to dataset_fn(), '
'e.g. to override default options. May not contain "batch_size".')
p.Define(
'features_to_read', [], 'Regular expression(s) of feature names. '
'If empty, defaults to all features.')
p.Define('preprocessors', {},
'Dictionary of input_feature_name => layer_params.')
p.Define('preprocess_parallelism', tf.data.experimental.AUTOTUNE,
'Number of batches to preprocess in parallel.')
# Set reasonable defaults.
p.name = 'milan_input_generator'
p.batch_size = 32
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if 'batch_size' in p.dataset_fn_kwargs:
raise ValueError('dataset_fn_kwargs may not contain "batch_size".')
if not isinstance(p.features_to_read, (tuple, list, type(None))):
raise ValueError(
'Expected sequence type for "features_to_read"; got {}'.format(
type(p.features_to_read)))
if p.preprocessors:
self._preprocessor_input_names, preprocessor_layer_params = list(
zip(*list(p.preprocessors.items())))
self.CreateChildren('_preprocessors', list(preprocessor_layer_params))
def GetPreprocessedInputBatch(self):
p = self.params
# Dataset of parsed examples.
dataset = p.dataset_fn(
batch_size=self.InfeedBatchSize(), **p.dataset_fn_kwargs)
dataset = dataset.map(
# Force retracing if self.do_eval changes.
functools.partial(self._PreprocessInputBatch, do_eval=self.do_eval),
num_parallel_calls=p.preprocess_parallelism)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
return input_batch
def _FilterFeaturesByName(self, features):
p = self.params
if not p.features_to_read:
return features
union_regex = re.compile('({})'.format('|'.join(p.features_to_read)))
return features.FilterKeyVal(lambda k, _: union_regex.match(k))
@tf.function(experimental_relax_shapes=True)
def _PreprocessInputBatch(self, input_batch, do_eval: bool):
del do_eval # Only exists to force separate train/eval mode traces.
input_batch = py_utils.NestedMap(input_batch)
input_batch = self._FilterFeaturesByName(input_batch)
# Apply preprocessors.
if self.params.preprocessors:
for input_name, preprocessor in zip(self._preprocessor_input_names,
self._preprocessors):
input_batch[input_name] = preprocessor(input_batch[input_name])
# Remove any string features if training on TPU.
if py_utils.use_tpu():
input_batch = input_batch.Filter(lambda t: t.dtype != tf.string)
logging.info('Final input batch: %s', input_batch)
return input_batch
|
Vadimkin/aristocrats-to-mp3
|
run.py
|
# coding=utf-8
import json
import time
import sys
import urllib
import urllib2
from bs4 import BeautifulSoup
try:
import settings
except ImportError:
print("Rename settings.example to settings.py")
sys.exit(0)
def get_playlist(page):
html = urllib2.urlopen(page).read()
data = BeautifulSoup(html)
tracks = []
for one_track in data.find_all('div', attrs={'class': 'track'}):
author = one_track.contents[1].string
track = one_track.contents[2]
tracks.append("{0} {1}".format(author, track))
return tracks
def save_tracks(tracks=None):
try:
for one_track in tracks:
payload = {
'q': one_track,
'access_token': settings.VK_TOKEN
}
request = urllib2.Request(url="https://api.vk.com/method/audio.search?" + urllib.urlencode(payload))
data = json.loads(urllib2.urlopen(request).read())
if data['response'][0] > 0 and data['response'][1]['url'] is not None:
file_name = "{0} - {1}".format(data['response'][1]['artist'].encode('utf-8'), data['response'][1]['title'].encode('utf-8'))
file_url = data['response'][1]['url']
print("Downloading {0}".format(file_name))
urllib.urlretrieve(file_url, settings.SAVE_PATH + "/" + file_name + ".mp3")
else:
print("NOT FOUND — {0}".format(one_track))
time.sleep(0.4)
except TypeError:
print("Empty tracklist")
if __name__ == "__main__":
if settings.VK_TOKEN == "":
print("Open {0} in browser, get token from url and put to variable VK_LINK".format(settings.VK_LINK))
sys.exit(0)
tracklist = get_playlist(settings.ARISTOCRATS_PlAYLIST)
save_tracks(tracklist)
|
quantumlib/Cirq
|
cirq-core/cirq/interop/quirk/cells/frequency_space_cells_test.py
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sympy
import cirq
from cirq.interop.quirk.cells.testing import assert_url_to_circuit_returns
def test_frequency_space_gates():
a, b, c = cirq.LineQubit.range(3)
assert_url_to_circuit_returns(
'{"cols":[["QFT3"]]}',
cirq.Circuit(
cirq.qft(a, b, c),
),
)
assert_url_to_circuit_returns(
'{"cols":[["QFT†3"]]}',
cirq.Circuit(
cirq.inverse(cirq.qft(a, b, c)),
),
)
assert_url_to_circuit_returns(
'{"cols":[["PhaseGradient3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=0.5)(a, b, c),
),
)
assert_url_to_circuit_returns(
'{"cols":[["PhaseUngradient3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=-0.5)(a, b, c),
),
)
t = sympy.Symbol('t')
assert_url_to_circuit_returns(
'{"cols":[["grad^t2"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=2, exponent=2 * t)(a, b),
),
)
assert_url_to_circuit_returns(
'{"cols":[["grad^t3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=4 * t)(a, b, c),
),
)
assert_url_to_circuit_returns(
'{"cols":[["grad^-t3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=-4 * t)(a, b, c),
),
)
|
vegitron/django-template-preprocess
|
template_preprocess/processor.py
|
from django.conf import settings
from importlib import import_module
from template_preprocess.util.loader import Loader
from template_preprocess.util.content_type import filename_is_html
def process_sub_template(name, seen_templates):
content = Loader().get_template_content(name)
is_html = filename_is_html(name)
return process_template_content(content,
seen_templates,
subcall=True,
is_html=is_html)
def process_template_content(content,
seen_templates=None,
subcall=False,
is_html=False):
# The basic strategy here is to build the template up to it's full
# included/extended size, then work on the minimizing or precomputing
# content from there. That makes it multi-pass, but it avoids having a
# dependency order.
# If anything fails, just return the original template. Worse case is
# django's default behavior.
if seen_templates is None:
seen_templates = {}
original_content = content
processors = get_processors()
for processor in processors:
try:
method = processor["method"]
only_html = processor["html_only"]
if only_html and not is_html:
continue
content = method(content,
seen_templates=seen_templates,
template_processor=process_sub_template,
)
except Exception as ex:
# We want to return the original template content if there are any
# errors. if we're processing an include/extended template, we
# need to kick it back another level
if subcall:
raise
return original_content
return content
def get_default_config():
return [
{"method": "template_preprocess.process.extends.handle_extends"},
{"method": "template_preprocess.process.includes.handle_includes"},
{"method": "template_preprocess.process.compress_statics.process",
"html_only": True
},
{"method": "template_preprocess.process.html_minify.process",
"html_only": True
},
{"method": "template_preprocess.process.static.handle_static_tag",
"html_only": True
},
# minify won't minify content in <script> tags, so this needs
# to be the last thing done
{"method": "template_preprocess.process.handlebars.process"},
]
def get_processors():
config = getattr(settings,
"TEMPLATE_PREPROCESS_PROCESSORS",
get_default_config())
processors = []
for value in config:
name = value["method"]
module, attr = name.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, str(e)))
try:
method = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a '
'"%s" method' % (module, attr))
processor = {"method": method, "html_only": False}
if "html_only" in value and value["html_only"]:
processor["html_only"] = True
processors.append(processor)
return processors
|
navrasio/mxnet
|
tests/python/gpu/test_kvstore_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
import os
import mxnet as mx
import numpy as np
import unittest
from mxnet.test_utils import assert_almost_equal, default_context
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
def init_kv_with_str(stype='default', kv_type='local'):
"""init kv """
kv = mx.kv.create(kv_type)
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
# Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump.
# Not reproducible, so this test is back on random seeds.
@with_seed()
def test_rsp_push_pull():
def check_rsp_push_pull(kv_type, is_push_cpu=True):
kv = init_kv_with_str('row_sparse', kv_type)
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
push_ctxs = [mx.cpu(i) if is_push_cpu else mx.gpu(i) for i in range(2)]
kv.push('e', [mx.nd.ones(shape, ctx=context).tostype('row_sparse') for context in push_ctxs])
def check_rsp_pull(kv, count, ctxs, is_same_rowid=False, use_slice=False):
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for i in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
check_rsp_pull(kv, 1, [mx.gpu(0)])
check_rsp_pull(kv, 1, [mx.cpu(0)])
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)])
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], is_same_rowid=True)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)])
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], is_same_rowid=True)
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], use_slice=True)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], use_slice=True)
# test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/9384
# check_rsp_push_pull('local')
check_rsp_push_pull('device')
check_rsp_push_pull('device', is_push_cpu=False)
def test_row_sparse_pull_single_device():
kvstore = mx.kv.create('device')
copy = mx.nd.random_normal(shape=(4,4), ctx=mx.gpu(0))
grad = copy.tostype("row_sparse")
key = 0
kvstore.init(key, grad)
idx = grad.indices
kvstore.push(key, grad)
kvstore.row_sparse_pull(key, out=grad, row_ids=idx)
assert_almost_equal(grad.asnumpy(), copy.asnumpy())
def test_rsp_push_pull_large_rowid():
num_rows = 793470
val = mx.nd.ones((num_rows, 1)).tostype('row_sparse').copyto(mx.gpu())
kv = mx.kv.create('device')
kv.init('a', val)
out = mx.nd.zeros((num_rows,1), stype='row_sparse').copyto(mx.gpu())
kv.push('a', val)
kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64'))
assert(out.indices.shape[0] == num_rows)
if __name__ == '__main__':
import nose
nose.runmodule()
|
kejrp23/Python
|
combination.py
|
"""
This Program is being written to incorporate all
things Python that I have learned thus far.
I am writing this in windows notepad to better test
my ablility to do proper spacing and indentions
without having a proper tool that will handle small
things for me.
I'm unsure at the time of this writing where this
program will go. My main goal is or it to just work.
Created by: Jason R. Pittman
Creation start Date: 2/22/2016 10:18am
"""
from datetime import datetime
from time import sleep
from math import *
print "Initializing application.............."
Sleep(3)
print "Welcome to Python Adventures............"
sleep(4)
User_name =raw_input("Please enter your name so I know whom to refer to: "
work_bag = [
"Laptop",
"SSD",
"FlashDrive",
"Binder",
"Pens",
"Name Badge"
]
lunch_box = [
"apple,
"soup",
"Ukrop's rolls",
"mountain dew"
]
preparing_for_work = [
"Shower",
"Brush Teeth",
"get dressed"
]
def getting_ready():
print "Good morning " + User_name + " it's time to get ready for work!"
answer = user_input("Are you going to work today? yes or no please: "
if answer.lower() is == "yes":
print "OK make sure you do these things! " + preparing_for_work
else:
print "Well enjoy your day off and we will se you next time!"
return
getting_ready()
|
PapenfussLab/Mungo
|
bin/fasta2phylip.py
|
#!/usr/bin/env python
"""
clustal2phylip.py <input filename> <output filename>
Author: Tony Papenfuss
Date: Mon Jan 8 11:44:37 EST 2007
"""
import os, sys
from optparse import OptionParser
from mungo.align import Alignment
usage = "%prog [options] <input file> <output file>"
parser = OptionParser(usage=usage)
options, args = parser.parse_args(sys.argv)
if len(args)!=3:
sys.exit(__doc__)
iFilename = args[1]
oFilename = args[2]
alignment = Alignment.load(iFilename, format='fasta')
alignment.save(oFilename, format='phylip')
|
sergeneren/anima
|
tests/ui/test_version_creator.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import sys
import shutil
import tempfile
import logging
import unittest
from anima.env.testing import TestEnvironment
logger = logging.getLogger('anima.ui.version_creator')
logger.setLevel(logging.DEBUG)
from stalker.models.auth import LocalSession
from anima.ui import IS_PYSIDE, IS_PYQT4, SET_PYSIDE, version_creator
SET_PYSIDE()
if IS_PYSIDE():
logger.debug('environment is set to pyside, importing pyside')
from PySide import QtCore, QtGui
from PySide.QtTest import QTest
from PySide.QtCore import Qt
elif IS_PYQT4():
logger.debug('environment is set to pyqt4, importing pyqt4')
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
from stalker import (db, defaults, User, Project, Repository, Structure,
Status, StatusList, Task, Group, Version)
class VersionCreatorTester(unittest.TestCase):
"""Tests the Version Creator instance
"""
repo_path = ''
@classmethod
def setUpClass(cls):
"""setup once
"""
# remove the transaction manager
db.DBSession.remove()
cls.repo_path = tempfile.mkdtemp()
defaults.local_storage_path = tempfile.mktemp()
db.setup({
'sqlalchemy.url': 'sqlite:///:memory:',
'sqlalchemy.echo': 'false'
})
db.init()
# create Power Users Group
cls.power_users_group = Group(name='Power Users')
db.DBSession.add(cls.power_users_group)
db.DBSession.commit()
# create a LocalSession first
cls.admin = User.query.all()[0]
cls.lsession = LocalSession()
cls.lsession.store_user(cls.admin)
cls.lsession.save()
# create a repository
cls.test_repo1 = Repository(
name='Test Repository',
windows_path='T:/TestRepo/',
linux_path='/mnt/T/TestRepo/',
osx_path='/Volumes/T/TestRepo/'
)
cls.test_structure1 = Structure(
name='Test Project Structure',
templates=[],
custom_template=''
)
cls.status_new = Status.query.filter_by(code='NEW').first()
cls.status_wip = Status.query.filter_by(code='WIP').first()
cls.status_cmpl = Status.query.filter_by(code='CMPL').first()
cls.project_status_list = StatusList(
name='Project Statuses',
statuses=[cls.status_new, cls.status_wip, cls.status_cmpl],
target_entity_type=Project
)
# create a couple of projects
cls.test_project1 = Project(
name='Project 1',
code='P1',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.test_project2 = Project(
name='Project 2',
code='P2',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.test_project3 = Project(
name='Project 3',
code='P3',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.projects = [
cls.test_project1,
cls.test_project2,
cls.test_project3
]
cls.test_user1 = User(
name='Test User',
# groups=[self.power_users_group],
login='tuser',
email='tuser@tusers.com',
password='secret'
)
db.DBSession.add(cls.test_user1)
db.DBSession.commit()
cls.admin.projects.append(cls.test_project1)
cls.admin.projects.append(cls.test_project2)
cls.admin.projects.append(cls.test_project3)
cls.test_user1.projects.append(cls.test_project1)
cls.test_user1.projects.append(cls.test_project2)
cls.test_user1.projects.append(cls.test_project3)
# project 1
cls.test_task1 = Task(
name='Test Task 1',
project=cls.test_project1,
resources=[cls.admin],
)
cls.test_task2 = Task(
name='Test Task 2',
project=cls.test_project1,
resources=[cls.admin],
)
cls.test_task3 = Task(
name='Test Task 2',
project=cls.test_project1,
resources=[cls.admin],
)
# project 2
cls.test_task4 = Task(
name='Test Task 4',
project=cls.test_project2,
resources=[cls.admin],
)
cls.test_task5 = Task(
name='Test Task 5',
project=cls.test_project2,
resources=[cls.admin],
)
cls.test_task6 = Task(
name='Test Task 6',
parent=cls.test_task5,
resources=[cls.admin],
)
cls.test_task7 = Task(
name='Test Task 7',
parent=cls.test_task5,
resources=[],
)
cls.test_task8 = Task(
name='Test Task 8',
parent=cls.test_task5,
resources=[],
)
cls.test_task9 = Task(
name='Test Task 9',
parent=cls.test_task5,
resources=[],
)
# +-> Project 1
# | |
# | +-> Task1
# | |
# | +-> Task2
# | |
# | +-> Task3
# |
# +-> Project 2
# | |
# | +-> Task4
# | |
# | +-> Task5
# | |
# | +-> Task6
# | |
# | +-> Task7 (no resource)
# | |
# | +-> Task8 (no resource)
# | |
# | +-> Task9 (no resource)
# |
# +-> Project 3
# record them all to the db
db.DBSession.add_all([
cls.admin, cls.test_project1, cls.test_project2, cls.test_project3,
cls.test_task1, cls.test_task2, cls.test_task3, cls.test_task4,
cls.test_task5, cls.test_task6, cls.test_task7, cls.test_task8,
cls.test_task9
])
db.DBSession.commit()
cls.all_tasks = [
cls.test_task1, cls.test_task2, cls.test_task3, cls.test_task4,
cls.test_task5, cls.test_task6, cls.test_task7, cls.test_task8,
cls.test_task9
]
# create versions
cls.test_version1 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
db.DBSession.add(cls.test_version1)
db.DBSession.commit()
cls.test_version2 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
db.DBSession.add(cls.test_version2)
db.DBSession.commit()
cls.test_version3 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
cls.test_version3.is_published = True
db.DBSession.add(cls.test_version3)
db.DBSession.commit()
cls.test_version4 = Version(
cls.test_task1,
take_name='Main@GPU',
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
cls.test_version4.is_published = True
db.DBSession.add(cls.test_version4)
db.DBSession.commit()
if not QtGui.QApplication.instance():
logger.debug('creating a new QApplication')
cls.app = QtGui.QApplication(sys.argv)
else:
logger.debug('using the present QApplication: %s' % QtGui.qApp)
# self.app = QtGui.qApp
cls.app = QtGui.QApplication.instance()
# cls.test_environment = TestEnvironment()
cls.dialog = version_creator.MainDialog()
# environment=cls.test_environment
# )
@classmethod
def tearDownClass(cls):
"""teardown once
"""
shutil.rmtree(
defaults.local_storage_path,
True
)
shutil.rmtree(cls.repo_path)
# configure with transaction manager
db.DBSession.remove()
def show_dialog(self, dialog):
"""show the given dialog
"""
dialog.show()
self.app.exec_()
self.app.connect(
self.app,
QtCore.SIGNAL("lastWindowClosed()"),
self.app,
QtCore.SLOT("quit()")
)
def test_close_button_closes_ui(self):
"""testing if the close button is closing the ui
"""
self.dialog.show()
self.assertEqual(self.dialog.isVisible(), True)
# now run the UI
QTest.mouseClick(self.dialog.close_pushButton, Qt.LeftButton)
self.assertEqual(self.dialog.isVisible(), False)
def test_login_dialog_is_shown_if_there_are_no_logged_in_user(self):
"""testing if the login dialog is shown if there is no logged in user
"""
self.fail("Test is not implemented yet")
def test_logged_in_user_field_is_updated_correctly(self):
"""testing if the logged_in_user field is updated correctly
"""
# now expect to see the admin.name on the dialog.logged_in_user_label
self.assertEqual(
self.dialog.logged_in_user_label.text(),
self.admin.name
)
def test_logout_button_shows_the_login_dialog(self):
"""logout dialog shows the login_dialog
"""
self.fail('test is not implemented yet')
def test_tasks_tree_view_is_filled_with_projects(self):
"""testing if the tasks_treeView is filled with projects as root
level items
"""
# now call the dialog and expect to see all these projects as root
# level items in tasks_treeView
self.assertEqual(
len(self.admin.tasks),
5
)
task_tree_model = self.dialog.tasks_treeView.model()
row_count = task_tree_model.rowCount()
self.assertEqual(3, row_count)
index = task_tree_model.index(0, 0)
p1_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p1_item.task, self.test_project1)
index = task_tree_model.index(1, 0)
p2_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p2_item.task, self.test_project2)
index = task_tree_model.index(2, 0)
p3_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p3_item.task, self.test_project3)
# self.show_dialog(dialog)
def test_tasks_tree_view_lists_all_tasks_properly(self):
"""testing if the tasks_treeView lists all the tasks properly
"""
task_tree_model = self.dialog.tasks_treeView.model()
row_count = task_tree_model.rowCount()
self.assertEqual(3, row_count)
# project1
index = task_tree_model.index(0, 0)
p1_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p1_item.task, self.test_project1)
# project2
index = task_tree_model.index(1, 0)
p2_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p2_item.task, self.test_project2)
# project3
index = task_tree_model.index(2, 0)
p3_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p3_item.task, self.test_project3)
# self.show_dialog(self.dialog)
# task1
task1_item = p1_item.child(0, 0)
self.assertEqual(task1_item.task, self.test_task1)
def test_tasks_treeView_lists_only_my_tasks_if_checked(self):
"""testing if the tasks_treeView lists only my tasks if
my_tasks_only_checkBox is checked
"""
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
# check show my tasks only check box
self.dialog.my_tasks_only_checkBox.setChecked(True)
# check if all my tasks are represented in the tree
my_tasks = self.admin.tasks
# generate a list of parent tasks
all_my_parent_tasks = []
for task in my_tasks:
all_my_parent_tasks += task.parents
all_my_parent_tasks = list(set(all_my_parent_tasks))
for task in my_tasks:
self.dialog.find_and_select_entity_item_in_treeView(
task,
self.dialog.tasks_treeView
)
# get the current selection
self.assertEqual(
task,
self.dialog.get_task_id()
)
# check if non of the other tasks or their parents are visible
for task in self.all_tasks:
if task not in my_tasks and task not in all_my_parent_tasks:
self.dialog.find_and_select_entity_item_in_treeView(
task,
self.dialog.tasks_treeView
)
# get the current selection
self.assertTrue(self.dialog.get_task_id() is None)
# now un check it and check if all tasks are shown
self.dialog.my_tasks_only_checkBox.setChecked(False)
# check if all the tasks are present in the tree
for task in self.all_tasks:
self.dialog.find_and_select_entity_item_in_treeView(
task,
self.dialog.tasks_treeView
)
# get the current selection
self.assertEqual(self.dialog.get_task_id(), task)
def test_takes_listWidget_lists_Main_by_default(self):
"""testing if the takes_listWidget lists "Main" by default
"""
dialog = version_creator.MainDialog()
self.assertEqual(
defaults.version_take_name,
dialog.takes_listWidget.currentItem().text()
)
def test_takes_listWidget_lists_Main_by_default_for_tasks_with_no_versions(self):
"""testing if the takes_listWidget lists "Main" by default for a task
with no version
"""
# now call the dialog and expect to see all these projects as root
# level items in tasks_treeView
dialog = version_creator.MainDialog()
# self.show_dialog(dialog)
self.assertEqual(
defaults.version_take_name,
dialog.takes_listWidget.currentItem().text()
)
def test_takes_listWidget_lists_Main_by_default_for_projects_with_no_tasks(self):
"""testing if the takes_listWidget lists "Main" by default for a
project with no tasks
"""
# now call the dialog and expect to see all these projects as root
# level items in tasks_treeView
dialog = version_creator.MainDialog()
# self.show_dialog(dialog)
self.assertEqual(
defaults.version_take_name,
dialog.takes_listWidget.currentItem().text()
)
def test_tasks_treeView_tasks_are_sorted(self):
"""testing if tasks in tasks_treeView are sorted according to their
names
"""
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
self.dialog.tasks_treeView.expand(index)
task1_item = project1_item.child(0, 0)
self.assertEqual(task1_item.text(), self.test_task1.name)
task2_item = project1_item.child(1, 0)
self.assertEqual(task2_item.text(), self.test_task2.name)
def test_tasks_treeView_do_not_cause_a_segfault(self):
"""there was a bug causing a segfault
"""
dialog = version_creator.MainDialog()
dialog = version_creator.MainDialog()
dialog = version_creator.MainDialog()
def test_previous_versions_tableWidget_is_filled_with_proper_info(self):
"""testing if the previous_versions_tableWidget is filled with proper
information
"""
# select the t1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
# expand it
self.dialog.tasks_treeView.expand(index)
# get first child which is task1
task1_item = project1_item.child(0, 0)
# select task1
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# select the first take
self.dialog.takes_listWidget.setCurrentRow(0)
# the row count should be 2
self.assertEqual(
self.dialog.previous_versions_tableWidget.rowCount(),
3
)
# now check if the previous versions tableWidget has the info
versions = [self.test_version1, self.test_version2, self.test_version3]
for i in range(len(versions)):
self.assertEqual(
int(self.dialog.previous_versions_tableWidget.item(i, 0).text()),
versions[i].version_number
)
self.assertEqual(
self.dialog.previous_versions_tableWidget.item(i, 2).text(),
versions[i].created_by.name
)
self.assertEqual(
self.dialog.previous_versions_tableWidget.item(i, 6).text(),
versions[i].description
)
def test_get_new_version_with_publish_check_box_is_checked_creates_published_version(self):
"""testing if checking publish_checkbox will create a published Version
instance
"""
# select the t1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
# expand it
self.dialog.tasks_treeView.expand(index)
# get first child which is task1
task1_item = project1_item.child(0, 0)
# select task1
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# first check if unpublished
new_version = self.dialog.get_new_version()
# is_published should be True
self.assertFalse(new_version.is_published)
# check task
self.assertEqual(new_version.task, self.test_task1)
# check the publish checkbox
self.dialog.publish_checkBox.setChecked(True)
new_version = self.dialog.get_new_version()
# check task
self.assertEqual(new_version.task, self.test_task1)
# is_published should be True
self.assertTrue(new_version.is_published)
def test_users_can_change_the_publish_state_if_they_are_the_owner(self):
"""testing if the users are able to change the publish method if it is
their versions
"""
# select the t1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
# expand it
self.dialog.tasks_treeView.expand(index)
# get first child which is task1
task1_item = project1_item.child(0, 0)
# select task1
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# check if the menu item has a publish method for v8
self.fail('test is not completed yet')
def test_thumbnails_are_displayed_correctly(self):
"""testing if the thumbnails are displayed correctly
"""
self.fail('test is not implemented yet')
def test_representations_combo_box_lists_all_representations_of_current_env(self):
"""testing if representations_comboBox lists all the possible
representations in current environment
"""
test_environment = TestEnvironment()
dialog = version_creator.MainDialog(
environment=test_environment
)
for i in range(len(TestEnvironment.representations)):
repr_name = TestEnvironment.representations[i]
combo_box_text = dialog.representations_comboBox.itemText(i)
self.assertEqual(repr_name, combo_box_text)
def test_repr_as_separate_takes_check_box_is_unchecked_by_default(self):
"""testing if repr_as_separate_takes_checkBox is unchecked by default
"""
self.assertFalse(
self.dialog.repr_as_separate_takes_checkBox.isChecked()
)
def test_repr_as_separate_takes_check_box_is_working_properly(self):
"""testing if when the repr_as_separate_takes_checkBox is checked it
will update the takes_listWidget to also show representation takes
"""
# select project 1 -> task1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
self.dialog.tasks_treeView.expand(index)
task1_item = project1_item.child(0, 0)
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# expect only one "Main" take listed in take_listWidget
self.assertEqual(
sorted(self.dialog.takes_listWidget.take_names),
['Main']
)
# check the repr_as_separate_takes_checkBox
self.dialog.repr_as_separate_takes_checkBox.setChecked(True)
# expect two takes of "Main" and "Main@GPU"
self.assertEqual(
sorted(self.dialog.takes_listWidget.take_names),
['Main', 'Main@GPU']
)
# self.show_dialog(self.dialog)
def test_takes_with_representations_shows_in_blue(self):
"""testing if takes with representations will be displayed in blue
"""
# select project 1 -> task1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
self.dialog.tasks_treeView.expand(index)
task1_item = project1_item.child(0, 0)
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# expect only one "Main" take listed in take_listWidget
main_item = self.dialog.takes_listWidget.item(0)
item_foreground = main_item.foreground()
color = item_foreground.color()
self.assertEqual(
color,
QtGui.QColor(0, 0, 255)
)
|
agoose77/hivesystem
|
manual/movingpanda/panda-6.py
|
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = NodePath("")
a.setHpr(360 * random(), 0, 0)
a.setPos(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "NodePath")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d, canvasargs
from bee.drone import dummydrone
from libcontext.pluginclasses import plugin_single_required
class parameters: pass
class myscene(bee.frame):
pandaclassname_ = bee.get_parameter("pandaclassname")
pandaname_ = bee.get_parameter("pandaname")
pandaicon_ = bee.get_parameter("pandaicon")
c1 = bee.configure("scene")
c1.import_mesh_EGG("models/environment")
a = NodePath("")
a.setScale(0.25)
a.setPos(-8, 42, 0)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c1.add_model_MATRIX(matrix=m)
c2 = bee.configure("scene")
c2.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c2.add_actor_MATRIX(matrix=m, entityname=pandaname_)
c2.import_mesh_EGG("models/panda-walk4")
c2.add_animation("walk")
c3 = bee.configure("scene")
c3.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c3.add_actorclass_MATRIX(matrix=m, actorclassname=pandaclassname_)
c3.import_mesh_EGG("models/panda-walk4")
c3.add_animation("walk")
box = box2d(50, 470, 96, 96)
params = parameters()
params.transparency = True
args = canvasargs("pandaicon.png", pandaicon_, box, params)
plugin = plugin_single_required(args)
pattern = ("canvas", "draw", "init", ("object", "image"))
d1 = dummydrone(plugindict={pattern: plugin})
i1 = bee.init("mousearea")
i1.register(pandaicon_, box)
del a, m, mat, box, params, args, plugin, pattern
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
pandaicon = "pandaicon"
pandaicon_ = bee.attribute("pandaicon")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor(pandaicon_)
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
pandaname=pandaname_,
pandaclassname=pandaclassname_,
canvas=canvas,
mousearea=mousearea,
pandaicon=pandaicon_
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
|
tgrogers/gpgpu-sim_simulations
|
util/job_launching/common.py
|
from optparse import OptionParser
import subprocess
import re
import os
import yaml
import glob
import hashlib
this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
defined_apps = {}
defined_baseconfigs = {}
defined_xtracfgs = {}
def get_argfoldername( args ):
if args == "" or args == None:
return "NO_ARGS"
else:
foldername = re.sub(r"[^a-z^A-Z^0-9]", "_", str(args).strip())
# For every long arg lists - create a hash of the input args
if len(str(args)) > 256:
foldername = "hashed_args_" + hashlib.md5(args).hexdigest()
return foldername
# Test to see if the passed config adheres to any defined configs and add it to the configrations to run/collect.
def get_config(name, defined_baseconfigs, defined_xtracfgs):
tokens = name.split('-')
if tokens[0] not in defined_baseconfigs:
print "Could not fined {0} in defined basenames {1}".format(tokens[0], defined_baseconfigs)
return None
else:
config = (name, "", defined_baseconfigs[tokens[0]])
for token in tokens[1:]:
if token not in defined_xtracfgs:
print "Could not find {0} in defined xtraconfigs {1}".format(token, defined_xtracfgs)
return None
else:
oldName, oldXtra, oldBasename = config
config = \
(oldName, oldXtra + "\n#{0}\n{1}\n".format(token, defined_xtracfgs[token]), oldBasename)
return config
def load_defined_yamls():
define_yamls = glob.glob(os.path.join(this_directory, 'apps/define-*.yml'))
for def_yaml in define_yamls:
parse_app_definition_yaml( os.path.join(this_directory, 'apps', def_yaml), defined_apps)
define_yamls = glob.glob(os.path.join(this_directory, 'configs/define-*.yml'))
for def_yaml in define_yamls:
parse_config_definition_yaml( os.path.join(this_directory, 'configs', def_yaml), defined_baseconfigs, defined_xtracfgs )
def parse_app_definition_yaml( def_yml, apps ):
benchmark_yaml = yaml.load(open(def_yml), Loader=yaml.FullLoader)
for suite in benchmark_yaml:
apps[suite] = []
for exe in benchmark_yaml[suite]['execs']:
exe_name = exe.keys()[0]
args_list = exe.values()[0]
apps[suite].append(( benchmark_yaml[suite]['exec_dir'],
benchmark_yaml[suite]['data_dirs'],
exe_name, args_list ))
apps[suite + ":" + exe_name] = []
apps[suite + ":" + exe_name].append( ( benchmark_yaml[suite]['exec_dir'],
benchmark_yaml[suite]['data_dirs'],
exe_name, args_list ) )
count = 0
for args in args_list:
apps[suite + ":" + exe_name + ":" + str(count) ] = []
apps[suite + ":" + exe_name + ":" + str(count) ].append( ( benchmark_yaml[suite]['exec_dir'],
benchmark_yaml[suite]['data_dirs'],
exe_name, [args] ) )
count += 1
return
def parse_config_definition_yaml( def_yml, defined_baseconfigs, defined_xtracfgs ):
configs_yaml = yaml.load(open( def_yml ), Loader=yaml.FullLoader)
for config in configs_yaml:
if 'base_file' in configs_yaml[config]:
defined_baseconfigs[config] = os.path.expandvars(configs_yaml[config]['base_file'])
elif 'extra_params' in configs_yaml[config]:
defined_xtracfgs[config] = configs_yaml[config]['extra_params']
return
def gen_apps_from_suite_list( app_list ):
benchmarks = []
for app in app_list:
benchmarks += defined_apps[app]
return benchmarks
def gen_configs_from_list( cfg_list ):
configs = []
for cfg in cfg_list:
configs.append(get_config(cfg, defined_baseconfigs, defined_xtracfgs))
return configs
def get_cuda_version(this_directory):
# Get CUDA version
nvcc_out_filename = os.path.join( this_directory, "nvcc_out.{0}.txt".format(os.getpid()) )
nvcc_out_file = open(nvcc_out_filename, 'w+')
subprocess.call(["nvcc", "--version"],\
stdout=nvcc_out_file)
nvcc_out_file.seek(0)
cuda_version = re.sub(r".*release (\d+\.\d+).*", r"\1", nvcc_out_file.read().strip().replace("\n"," "))
nvcc_out_file.close()
os.remove(nvcc_out_filename)
os.environ['CUDA_VERSION'] = cuda_version
return cuda_version
# This function exists so that this file can accept both absolute and relative paths
# If no name is provided it sets the default
# Either way it does a test if the absolute path exists and if not, tries a relative path
def file_option_test(name, default, this_directory):
if name == "":
if default == "":
return ""
else:
name = os.path.join(this_directory, default)
try:
with open(name): pass
except IOError:
name = os.path.join(os.getcwd(), name)
try:
with open(name): pass
except IOError:
exit("Error - cannot open file {0}".format(name))
return name
def dir_option_test(name, default, this_directory):
if name == "":
name = os.path.join(this_directory, default)
if not os.path.isdir(name):
name = os.path.join(os.getcwd(), name)
if not os.path.isdir(name):
exit("Error - cannot open file {0}".format(name))
return name
def parse_run_simulations_options():
parser = OptionParser()
parser.add_option("-B", "--benchmark_list", dest="benchmark_list",
help="a comma seperated list of benchmark suites to run. See apps/define-*.yml for " +\
"the benchmark suite names.",
default="rodinia_2.0-ft")
parser.add_option("-C", "--configs_list", dest="configs_list",
help="a comma seperated list of configs to run. See configs/define-*.yml for " +\
"the config names.",
default="GTX480")
parser.add_option("-p", "--benchmark_exec_prefix", dest="benchmark_exec_prefix",
help="When submitting the job to torque this string" +\
" is placed before the command line that runs the benchmark. " +\
" Useful when wanting to run valgrind.", default="")
parser.add_option("-r", "--run_directory", dest="run_directory",
help="Name of directory in which to run simulations",
default="")
parser.add_option("-n", "--no_launch", dest="no_launch", action="store_true",
help="When set, no torque jobs are launched. However, all"+\
" the setup for running is performed. ie, the run"+\
" directories are created and are ready to run."+\
" This can be useful when you want to create a new" +\
" configuration, but want to test it locally before "+\
" launching a bunch of jobs.")
parser.add_option("-s", "--so_dir", dest="so_dir",
help="Point this to the directory that your .so is stored in. If nothing is input here - "+\
"the scripts will assume that you are using the so built in GPGPUSIM_ROOT.",
default="")
parser.add_option("-N", "--launch_name", dest="launch_name", default="",
help="Pass if you want to name the launch. This will determine the name of the logfile.\n" +\
"If you do not name the file, it will just use the current date/time.")
parser.add_option("-T", "--trace_dir", dest="trace_dir", default="",
help="Pass this option to run the simulator in trace-driven mode."+\
" The directory passed should be the root of all the trace files.")
parser.add_option("-M", "--job_mem", dest="job_mem", default="",
help="Memory usgae of the job in MB.")
(options, args) = parser.parse_args()
# Parser seems to leave some whitespace on the options, getting rid of it
if options.trace_dir != "":
options.trace_dir = dir_option_test( options.trace_dir.strip(), "", this_directory )
options.configs_list = options.configs_list.strip()
options.benchmark_exec_prefix = options.benchmark_exec_prefix.strip()
options.benchmark_list = options.benchmark_list.strip()
options.run_directory = options.run_directory.strip()
options.so_dir = options.so_dir.strip()
options.launch_name = options.launch_name.strip()
options.job_mem = options.job_mem.strip()
return (options, args)
|
sklam/numba
|
numba/cpython/hashing.py
|
"""
Hash implementations for Numba types
"""
import math
import numpy as np
import sys
import ctypes
import warnings
from collections import namedtuple
import llvmlite.binding as ll
import llvmlite.llvmpy.core as lc
from llvmlite import ir
from numba.core.extending import (
overload, overload_method, intrinsic, register_jitable)
from numba.core import errors
from numba.core import types, utils
from numba.core.unsafe.bytes import grab_byte, grab_uint64_t
_py38_or_later = utils.PYVERSION >= (3, 8)
# This is Py_hash_t, which is a Py_ssize_t, which has sizeof(size_t):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyport.h#L91-L96 # noqa: E501
_hash_width = sys.hash_info.width
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
# Constants from CPython source, obtained by various means:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyhash.h # noqa: E501
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
_PyHASH_MODULUS = _Py_uhash_t(sys.hash_info.modulus)
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
_PyHASH_IMAG = _PyHASH_MULTIPLIER
_PyLong_SHIFT = sys.int_info.bits_per_digit
_Py_HASH_CUTOFF = sys.hash_info.cutoff
_Py_hashfunc_name = sys.hash_info.algorithm
# hash(obj) is implemented by calling obj.__hash__()
@overload(hash)
def hash_overload(obj):
def impl(obj):
return obj.__hash__()
return impl
@register_jitable
def process_return(val):
asint = _Py_hash_t(val)
if (asint == int(-1)):
asint = int(-2)
return asint
# This is a translation of CPython's _Py_HashDouble:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L34-L129 # noqa: E501
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_uhash_t,
'm': types.double,
'e': types.intc,
'sign': types.intc,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.intc})
def _Py_HashDouble(v):
if not np.isfinite(v):
if (np.isinf(v)):
if (v > 0):
return _PyHASH_INF
else:
return -_PyHASH_INF
else:
return _PyHASH_NAN
m, e = math.frexp(v)
sign = 1
if (m < 0):
sign = -1
m = -m
# process 28 bits at a time; this should work well both for binary
# and hexadecimal floating point.
x = 0
while (m):
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28)
m *= 268435456.0 # /* 2**28 */
e -= 28
y = int(m) # /* pull out integer part */
m -= y
x += y
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
# /* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
if e >= 0:
e = e % _PyHASH_BITS
else:
e = _PyHASH_BITS - 1 - ((-1 - e) % _PyHASH_BITS)
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e)
x = x * sign
return process_return(x)
@intrinsic
def _fpext(tyctx, val):
def impl(cgctx, builder, signature, args):
val = args[0]
return builder.fpext(val, lc.Type.double())
sig = types.float64(types.float32)
return sig, impl
# This is a translation of CPython's long_hash, but restricted to the numerical
# domain reachable by int64/uint64 (i.e. no BigInt like support):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/longobject.c#L2934-L2989 # noqa: E501
# obdigit is a uint32_t which is typedef'd to digit
# int32_t is typedef'd to sdigit
@register_jitable(locals={'x': _Py_uhash_t,
'p1': _Py_uhash_t,
'p2': _Py_uhash_t,
'p3': _Py_uhash_t,
'p4': _Py_uhash_t,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.int32,
'_PyLong_SHIFT': types.int32,})
def _long_impl(val):
# This function assumes val came from a long int repr with val being a
# uint64_t this means having to split the input into PyLong_SHIFT size
# chunks in an unsigned hash wide type, max numba can handle is a 64bit int
# mask to select low _PyLong_SHIFT bits
_tmp_shift = 32 - _PyLong_SHIFT
mask_shift = (~types.uint32(0x0)) >> _tmp_shift
# a 64bit wide max means Numba only needs 3 x 30 bit values max,
# or 5 x 15 bit values max on 32bit platforms
i = (64 // _PyLong_SHIFT) + 1
# alg as per hash_long
x = 0
p3 = (_PyHASH_BITS - _PyLong_SHIFT)
for idx in range(i - 1, -1, -1):
p1 = x << _PyLong_SHIFT
p2 = p1 & _PyHASH_MODULUS
p4 = x >> p3
x = p2 | p4
# the shift and mask splits out the `ob_digit` parts of a Long repr
x += types.uint32((val >> idx * _PyLong_SHIFT) & mask_shift)
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
return _Py_hash_t(x)
# This has no CPython equivalent, CPython uses long_hash.
@overload_method(types.Integer, '__hash__')
@overload_method(types.Boolean, '__hash__')
def int_hash(val):
_HASH_I64_MIN = -2 if sys.maxsize <= 2 ** 32 else -4
_SIGNED_MIN = types.int64(-0x8000000000000000)
# Find a suitable type to hold a "big" value, i.e. iinfo(ty).min/max
# this is to ensure e.g. int32.min is handled ok as it's abs() is its value
_BIG = types.int64 if getattr(val, 'signed', False) else types.uint64
# this is a bit involved due to the CPython repr of ints
def impl(val):
# If the magnitude is under PyHASH_MODULUS, just return the
# value val as the hash, couple of special cases if val == val:
# 1. it's 0, in which case return 0
# 2. it's signed int minimum value, return the value CPython computes
# but Numba cannot as there's no type wide enough to hold the shifts.
#
# If the magnitude is greater than PyHASH_MODULUS then... if the value
# is negative then negate it switch the sign on the hash once computed
# and use the standard wide unsigned hash implementation
val = _BIG(val)
mag = abs(val)
if mag < _PyHASH_MODULUS:
if val == 0:
ret = 0
elif val == _SIGNED_MIN: # e.g. int64 min, -0x8000000000000000
ret = _Py_hash_t(_HASH_I64_MIN)
else:
ret = _Py_hash_t(val)
else:
needs_negate = False
if val < 0:
val = -val
needs_negate = True
ret = _long_impl(val)
if needs_negate:
ret = -ret
return process_return(ret)
return impl
# This is a translation of CPython's float_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/floatobject.c#L528-L532 # noqa: E501
@overload_method(types.Float, '__hash__')
def float_hash(val):
if val.bitwidth == 64:
def impl(val):
hashed = _Py_HashDouble(val)
return hashed
else:
def impl(val):
# widen the 32bit float to 64bit
fpextended = np.float64(_fpext(val))
hashed = _Py_HashDouble(fpextended)
return hashed
return impl
# This is a translation of CPython's complex_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/complexobject.c#L408-L428 # noqa: E501
@overload_method(types.Complex, '__hash__')
def complex_hash(val):
def impl(val):
hashreal = hash(val.real)
hashimag = hash(val.imag)
# Note: if the imaginary part is 0, hashimag is 0 now,
# so the following returns hashreal unchanged. This is
# important because numbers of different types that
# compare equal must have the same hash value, so that
# hash(x + 0*j) must equal hash(x).
combined = hashreal + _PyHASH_IMAG * hashimag
return process_return(combined)
return impl
if _py38_or_later:
# Python 3.8 strengthened its hash alg for tuples.
# This is a translation of CPython's tuplehash for Python >=3.8
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L338-L391 # noqa: E501
# These consts are needed for this alg variant, they are from:
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L353-L363 # noqa: E501
if _Py_uhash_t.bitwidth // 8 > 4:
_PyHASH_XXPRIME_1 = _Py_uhash_t(11400714785074694791)
_PyHASH_XXPRIME_2 = _Py_uhash_t(14029467366897019727)
_PyHASH_XXPRIME_5 = _Py_uhash_t(2870177450012600261)
@register_jitable(locals={'x': types.uint64})
def _PyHASH_XXROTATE(x):
# Rotate left 31 bits
return ((x << types.uint64(31)) | (x >> types.uint64(33)))
else:
_PyHASH_XXPRIME_1 = _Py_uhash_t(2654435761)
_PyHASH_XXPRIME_2 = _Py_uhash_t(2246822519)
_PyHASH_XXPRIME_5 = _Py_uhash_t(374761393)
@register_jitable(locals={'x': types.uint64})
def _PyHASH_XXROTATE(x):
# Rotate left 13 bits
return ((x << types.uint64(13)) | (x >> types.uint64(16)))
# Python 3.7+ has literal_unroll, this means any homogeneous and
# heterogeneous tuples can use the same alg and just be unrolled.
from numba import literal_unroll
@register_jitable(locals={'acc': _Py_uhash_t, 'lane': _Py_uhash_t,
'_PyHASH_XXPRIME_5': _Py_uhash_t,
'_PyHASH_XXPRIME_1': _Py_uhash_t,
'tl': _Py_uhash_t})
def _tuple_hash(tup):
tl = len(tup)
acc = _PyHASH_XXPRIME_5
for x in literal_unroll(tup):
lane = hash(x)
if lane == _Py_uhash_t(-1):
return -1
acc += lane * _PyHASH_XXPRIME_2
acc = _PyHASH_XXROTATE(acc)
acc *= _PyHASH_XXPRIME_1
acc += tl ^ (_PyHASH_XXPRIME_5 ^ _Py_uhash_t(3527539))
if acc == _Py_uhash_t(-1):
return process_return(1546275796)
return process_return(acc)
else:
# This is a translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369 # noqa: E501
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_hash_t,
'mult': _Py_uhash_t,
'l': _Py_hash_t, })
def _tuple_hash(tup):
tl = len(tup)
mult = _PyHASH_MULTIPLIER
x = _Py_uhash_t(0x345678)
# in C this is while(--l >= 0), i is indexing tup instead of *tup++
for i, l in enumerate(range(tl - 1, -1, -1)):
y = hash(tup[i])
xxory = (x ^ y)
x = xxory * mult
mult += _Py_hash_t((_Py_uhash_t(82520) + l + l))
x += _Py_uhash_t(97531)
return process_return(x)
# This is an obfuscated translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369 # noqa: E501
# The obfuscation occurs for a heterogeneous tuple as each tuple member needs
# a potentially different hash() function calling for it. This cannot be done at
# runtime as there's no way to iterate a heterogeneous tuple, so this is
# achieved by essentially unrolling the loop over the members and inserting a
# per-type hash function call for each member, and then simply computing the
# hash value in an inlined/rolling fashion.
@intrinsic
def _tuple_hash_resolve(tyctx, val):
def impl(cgctx, builder, signature, args):
typingctx = cgctx.typing_context
fnty = typingctx.resolve_value_type(hash)
tupty, = signature.args
tup, = args
lty = cgctx.get_value_type(signature.return_type)
x = ir.Constant(lty, 0x345678)
mult = ir.Constant(lty, _PyHASH_MULTIPLIER)
shift = ir.Constant(lty, 82520)
tl = len(tupty)
for i, packed in enumerate(zip(tupty.types, range(tl - 1, -1, -1))):
ty, l = packed
sig = fnty.get_call_type(tyctx, (ty,), {})
impl = cgctx.get_function(fnty, sig)
tuple_val = builder.extract_value(tup, i)
y = impl(builder, (tuple_val,))
xxory = builder.xor(x, y)
x = builder.mul(xxory, mult)
lconst = ir.Constant(lty, l)
mult = builder.add(mult, shift)
mult = builder.add(mult, lconst)
mult = builder.add(mult, lconst)
x = builder.add(x, ir.Constant(lty, 97531))
return x
sig = _Py_hash_t(val)
return sig, impl
@overload_method(types.BaseTuple, '__hash__')
def tuple_hash(val):
if _py38_or_later or isinstance(val, types.Sequence):
def impl(val):
return _tuple_hash(val)
return impl
else:
def impl(val):
hashed = _Py_hash_t(_tuple_hash_resolve(val))
return process_return(hashed)
return impl
# ------------------------------------------------------------------------------
# String/bytes hashing needs hashseed info, this is from:
# https://stackoverflow.com/a/41088757
# with thanks to Martijn Pieters
#
# Developer note:
# CPython makes use of an internal "hashsecret" which is essentially a struct
# containing some state that is set on CPython initialization and contains magic
# numbers used particularly in unicode/string hashing. This code binds to the
# Python runtime libraries in use by the current process and reads the
# "hashsecret" state so that it can be used by Numba. As this is done at runtime
# the behaviour and influence of the PYTHONHASHSEED environment variable is
# accommodated.
from ctypes import ( # noqa
c_size_t,
c_ubyte,
c_uint64,
pythonapi,
Structure,
Union,
) # noqa
class FNV(Structure):
_fields_ = [
('prefix', c_size_t),
('suffix', c_size_t)
]
class SIPHASH(Structure):
_fields_ = [
('k0', c_uint64),
('k1', c_uint64),
]
class DJBX33A(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('suffix', c_size_t),
]
class EXPAT(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('hashsalt', c_size_t),
]
class _Py_HashSecret_t(Union):
_fields_ = [
# ensure 24 bytes
('uc', c_ubyte * 24),
# two Py_hash_t for FNV
('fnv', FNV),
# two uint64 for SipHash24
('siphash', SIPHASH),
# a different (!) Py_hash_t for small string optimization
('djbx33a', DJBX33A),
('expat', EXPAT),
]
_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value'])
# Only a few members are needed at present
def _build_hashsecret():
"""Read hash secret from the Python process
Returns
-------
info : dict
- keys are "djbx33a_suffix", "siphash_k0", siphash_k1".
- values are the namedtuple[symbol:str, value:int]
"""
# Read hashsecret and inject it into the LLVM symbol map under the
# prefix `_numba_hashsecret_`.
pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret')
info = {}
def inject(name, val):
symbol_name = "_numba_hashsecret_{}".format(name)
val = ctypes.c_uint64(val)
addr = ctypes.addressof(val)
ll.add_symbol(symbol_name, addr)
info[name] = _hashsecret_entry(symbol=symbol_name, value=val)
inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix)
inject('siphash_k0', pyhashsecret.siphash.k0)
inject('siphash_k1', pyhashsecret.siphash.k1)
return info
_hashsecret = _build_hashsecret()
# ------------------------------------------------------------------------------
if _Py_hashfunc_name in ('siphash24', 'fnv'):
# Check for use of the FNV hashing alg, warn users that it's not implemented
# and functionality relying of properties derived from hashing will be fine
# but hash values themselves are likely to be different.
if _Py_hashfunc_name == 'fnv':
msg = ("FNV hashing is not implemented in Numba. See PEP 456 "
"https://www.python.org/dev/peps/pep-0456/ "
"for rationale over not using FNV. Numba will continue to work, "
"but hashes for built in types will be computed using "
"siphash24. This will permit e.g. dictionaries to continue to "
"behave as expected, however anything relying on the value of "
"the hash opposed to hash as a derived property is likely to "
"not work as expected.")
warnings.warn(msg)
# This is a translation of CPython's siphash24 function:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413 # noqa: E501
# /* *********************************************************************
# <MIT License>
# Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </MIT License>
# Original location:
# https://github.com/majek/csiphash/
# Solution inspired by code from:
# Samuel Neves (supercop/crypto_auth/siphash24/little)
#djb (supercop/crypto_auth/siphash24/little2)
# Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
# Modified for Python by Christian Heimes:
# - C89 / MSVC compatibility
# - _rotl64() on Windows
# - letoh64() fallback
# */
@register_jitable(locals={'x': types.uint64,
'b': types.uint64, })
def _ROTATE(x, b):
return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b))))
@register_jitable(locals={'a': types.uint64,
'b': types.uint64,
'c': types.uint64,
'd': types.uint64,
's': types.uint64,
't': types.uint64, })
def _HALF_ROUND(a, b, c, d, s, t):
a += b
c += d
b = _ROTATE(b, s) ^ a
d = _ROTATE(d, t) ^ c
a = _ROTATE(a, 32)
return a, b, c, d
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64, })
def _DOUBLE_ROUND(v0, v1, v2, v3):
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
return v0, v1, v2, v3
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64,
'b': types.uint64,
'mi': types.uint64,
'tmp': types.Array(types.uint64, 1, 'C'),
't': types.uint64,
'mask': types.uint64,
'jmp': types.uint64,
'ohexefef': types.uint64})
def _siphash24(k0, k1, src, src_sz):
b = types.uint64(src_sz) << 56
v0 = k0 ^ types.uint64(0x736f6d6570736575)
v1 = k1 ^ types.uint64(0x646f72616e646f6d)
v2 = k0 ^ types.uint64(0x6c7967656e657261)
v3 = k1 ^ types.uint64(0x7465646279746573)
idx = 0
while (src_sz >= 8):
mi = grab_uint64_t(src, idx)
idx += 1
src_sz -= 8
v3 ^= mi
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= mi
# this is the switch fallthrough:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400 # noqa: E501
t = types.uint64(0x0)
boffset = idx * 8
ohexefef = types.uint64(0xff)
if src_sz >= 7:
jmp = (6 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6))
<< jmp)
if src_sz >= 6:
jmp = (5 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5))
<< jmp)
if src_sz >= 5:
jmp = (4 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4))
<< jmp)
if src_sz >= 4:
t &= types.uint64(0xffffffff00000000)
for i in range(4):
jmp = i * 8
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + i))
<< jmp)
if src_sz >= 3:
jmp = (2 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2))
<< jmp)
if src_sz >= 2:
jmp = (1 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1))
<< jmp)
if src_sz >= 1:
mask = ~(ohexefef)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0)))
b |= t
v3 ^= b
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= b
v2 ^= ohexefef
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
t = (v0 ^ v1) ^ (v2 ^ v3)
return t
else:
msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name
raise ValueError(msg)
@intrinsic
def _inject_hashsecret_read(tyctx, name):
"""Emit code to load the hashsecret.
"""
if not isinstance(name, types.StringLiteral):
raise errors.TypingError("requires literal string")
sym = _hashsecret[name.literal_value].symbol
resty = types.uint64
sig = resty(name)
def impl(cgctx, builder, sig, args):
mod = builder.module
try:
# Search for existing global
gv = mod.get_global(sym)
except KeyError:
# Inject the symbol if not already exist.
gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym)
v = builder.load(gv)
return v
return sig, impl
def _load_hashsecret(name):
return _hashsecret[name].value
@overload(_load_hashsecret)
def _impl_load_hashsecret(name):
def imp(name):
return _inject_hashsecret_read(name)
return imp
# This is a translation of CPythons's _Py_HashBytes:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191 # noqa: E501
@register_jitable(locals={'_hash': _Py_uhash_t})
def _Py_HashBytes(val, _len):
if (_len == 0):
return process_return(0)
if (_len < _Py_HASH_CUTOFF):
# TODO: this branch needs testing, needs a CPython setup for it!
# /* Optimize hashing of very small strings with inline DJBX33A. */
_hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */
for idx in range(_len):
_hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx))
_hash ^= _len
_hash ^= _load_hashsecret('djbx33a_suffix')
else:
tmp = _siphash24(types.uint64(_load_hashsecret('siphash_k0')),
types.uint64(_load_hashsecret('siphash_k1')),
val, _len)
_hash = process_return(tmp)
return process_return(_hash)
# This is an approximate translation of CPython's unicode_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663 # noqa: E501
@overload_method(types.UnicodeType, '__hash__')
def unicode_hash(val):
from numba.cpython.unicode import _kind_to_byte_width
def impl(val):
kindwidth = _kind_to_byte_width(val._kind)
_len = len(val)
# use the cache if possible
current_hash = val._hash
if current_hash != -1:
return current_hash
else:
# cannot write hash value to cache in the unicode struct due to
# pass by value on the struct making the struct member immutable
return _Py_HashBytes(val._data, kindwidth * _len)
return impl
|
aterrel/dynd-python
|
dynd/tests/test_array_getitem.py
|
import sys
import unittest
from dynd import nd, ndt
class TestArrayGetItem(unittest.TestCase):
def test_strided_dim(self):
a = nd.empty(100, ndt.int32)
a[...] = nd.range(100)
b = list(range(100))
self.assertEqual(nd.type_of(a), ndt.type('strided * int32'))
self.assertEqual(nd.type_of(a[...]), ndt.type('strided * int32'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[0:1]), ndt.type('strided * int32'))
self.assertEqual(nd.as_py(a[0]), b[0])
self.assertEqual(nd.as_py(a[99]), b[99])
self.assertEqual(nd.as_py(a[-1]), b[-1])
self.assertEqual(nd.as_py(a[-100]), b[-100])
self.assertEqual(nd.as_py(a[-101:]), b[-101:])
self.assertEqual(nd.as_py(a[-5:101:2]), b[-5:101:2])
self.assertRaises(IndexError, lambda x : x[-101], a)
self.assertRaises(IndexError, lambda x : x[100], a)
def test_fixed_dim(self):
a = nd.empty('100 * int32')
a[...] = nd.range(100)
b = list(range(100))
self.assertEqual(nd.type_of(a), ndt.type('100 * int32'))
self.assertEqual(nd.type_of(a[...]), ndt.type('100 * int32'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[0:1]), ndt.type('strided * int32'))
self.assertEqual(nd.as_py(a[0]), b[0])
self.assertEqual(nd.as_py(a[99]), b[99])
self.assertEqual(nd.as_py(a[-1]), b[-1])
self.assertEqual(nd.as_py(a[-100]), b[-100])
self.assertEqual(nd.as_py(a[-101:]), b[-101:])
self.assertEqual(nd.as_py(a[-5:101:2]), b[-5:101:2])
self.assertRaises(IndexError, lambda x : x[-101], a)
self.assertRaises(IndexError, lambda x : x[100], a)
def test_var_dim(self):
a = nd.empty('var * int32')
a[...] = nd.range(100)
b = list(range(100))
self.assertEqual(nd.type_of(a), ndt.type('var * int32'))
self.assertEqual(nd.type_of(a[...]), ndt.type('var * int32'))
self.assertEqual(nd.type_of(a[:]), ndt.type('strided * int32'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[0:1]), ndt.type('strided * int32'))
self.assertEqual(nd.as_py(a[0]), b[0])
self.assertEqual(nd.as_py(a[99]), b[99])
self.assertEqual(nd.as_py(a[-1]), b[-1])
self.assertEqual(nd.as_py(a[-100]), b[-100])
self.assertEqual(nd.as_py(a[-101:]), b[-101:])
self.assertEqual(nd.as_py(a[-5:101:2]), b[-5:101:2])
self.assertRaises(IndexError, lambda x : x[-101], a)
self.assertRaises(IndexError, lambda x : x[100], a)
def test_struct(self):
a = nd.parse_json('{x:int32, y:string, z:float32}',
'{"x":20, "y":"testing one two three", "z":-3.25}')
self.assertEqual(nd.type_of(a), ndt.type('{x:int32, y:string, z:float32}'))
self.assertEqual(nd.type_of(a[...]), ndt.type('{x:int32, y:string, z:float32}'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[1]), ndt.string)
self.assertEqual(nd.type_of(a[2]), ndt.float32)
self.assertEqual(nd.type_of(a[-3]), ndt.int32)
self.assertEqual(nd.type_of(a[-2]), ndt.string)
self.assertEqual(nd.type_of(a[-1]), ndt.float32)
self.assertEqual(nd.type_of(a[1:]), ndt.make_struct([ndt.string, ndt.float32], ['y', 'z']))
self.assertEqual(nd.type_of(a[::-2]), ndt.make_struct([ndt.float32, ndt.int32], ['z', 'x']))
self.assertEqual(nd.as_py(a[0]), 20)
self.assertEqual(nd.as_py(a[1]), "testing one two three")
self.assertEqual(nd.as_py(a[2]), -3.25)
self.assertEqual(nd.as_py(a[1:]), {'y':'testing one two three', 'z':-3.25})
self.assertEqual(nd.as_py(a[::-2]), {'x':20, 'z':-3.25})
if __name__ == '__main__':
unittest.main()
|
pipermerriam/perjury
|
tests/test_util.py
|
from unittest import TestCase
from perjury import generators as g
from perjury import util
from perjury.exceptions import UniqueValueTimeoutError
class TestUniqueDecorator(TestCase):
def test_is_pretty_unique(self):
# This is not the most scientific way to test it, but we have slightly
# more than 400 usernames, if we generate 400 unique usernames 1000
# times, it is probably likely that this works.
for i in xrange(1000):
unique_username = util.unique(g.username)
seen = set()
for i in xrange(400):
username = unique_username()
assert username not in seen
seen.add(username)
def test_overflow(self):
generator = util.unique(g.Choice(choices=(1, 2, 3)))
generator()
generator()
generator()
self.assertRaises(UniqueValueTimeoutError, generator)
class TestIterableUtils(TestCase):
def test_forever(self):
forever_usernames = util.forever(g.username)
count = 0
for username in forever_usernames:
count += 1
# 100,000 is basically forever right?
if count > 100000:
break
def test_times(self):
three_usernames = util.times(g.username, 3)
count = 0
for username in three_usernames:
count += 1
assert count == 3
def test_composability(self):
for i in xrange(1000):
unique_usernames = util.unique(g.username)
many_unique_usernames = util.times(unique_usernames, 400)
seen = set()
count = 0
for username in many_unique_usernames:
count += 1
assert username not in seen
seen.add(username)
assert count == 400
|
KholdStare/generators-to-coroutines
|
generators_to_coroutines/tools.py
|
from .decorators import invertibleGenerator, coroutine
def pushFromIterable(iterable, target):
try:
for elem in iterable:
target.send(elem)
target.close()
except StopIteration:
pass
@invertibleGenerator
def genPairs(iterable):
""" Aggregate two consecutive values into pairs """
buf = []
for elem in iterable:
buf.append(elem)
if len(buf) >= 2:
yield tuple(buf)
buf = []
@invertibleGenerator
def genFilter(predicate, iterable):
""" Filter based on predicate """
for elem in iterable:
if predicate(elem):
yield elem
@invertibleGenerator
def genPassthrough(iterable):
""" Pass values through without modification """
for val in iterable:
yield val
@invertibleGenerator
def genMap(func, iterable):
""" Map function on all values """
for val in iterable:
yield func(val)
@coroutine
def coSplit(predicate, trueTarget, falseTarget):
while True:
val = (yield)
if predicate(val):
trueTarget.send(val)
else:
falseTarget.send(val)
trueTarget.close()
falseTarget.close()
@coroutine
def coReceive():
while True:
val = (yield)
print("Got %s" % str(val))
|
TOTVS/mdmpublic
|
couchbase-cli/lib/python/couchbase/migrator/migrator.py
|
#
# Copyright 2012, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Reader(object):
def __init__(self, fp):
pass
def __iter__(self):
return self
def next(self):
raise NotImplementedError
def close(self):
pass
class Writer(object):
def __init__(self, fp):
pass
def write(self, record):
raise NotImplementedError
def close(self):
pass
|
WojciechMula/toys
|
avx512-remove-spaces/scripts/parse_speed.py
|
from os.path import basename
from procedure_names import display_name
def load(file):
data = {}
for line in file:
F = line.split(':')
if len(F) == 1:
file = basename(F[0].strip())
if file not in data:
data[file] = []
continue
assert file is not None
name = display_name[F[0].strip()]
if name is None:
continue
F = F[1].split()
time = int(F[1])
data[file].append((name, time))
return data
|
HenryHu/pybbs
|
UserManager.py
|
from UCache import UCache
import User
import UserMemo
import json
from errors import *
class UserManager:
users = {}
@staticmethod
def HandleLogin(svc, username, passwd):
user = UserManager.LoadUser(username)
if (user == None):
raise Unauthorized('Login failed')
if (user.Authorize(passwd)):
session = Session(user, svc.client_address[0])
ret = {}
ret['session'] = session.GetID()
svc.writedata(json.dumps(ret))
else:
raise Unauthorized('Login failed')
@staticmethod
def LoadUser(user):
userec = UCache.GetUser(user)
if (userec == None):
return None
user = userec.userid
if (user not in UserManager.users):
ruser = UserManager.LoadNewUser(user)
if (ruser == None):
return None
UserManager.users[user] = ruser
return UserManager.users[user]
@staticmethod
def LoadUserByUid(uid):
userec = UCache.GetUserByUid(uid)
if userec is None:
return None
user = userec.userid
return UserManager.LoadUser(user)
@staticmethod
def LoadNewUser(user):
userec = UCache.GetUser(user)
if (userec == None):
return None
umemo = UserMemo.UserMemoMgr.LoadUsermemo(user)
if (umemo is None):
return None
ruser = User.User(user, userec, umemo)
return ruser
from Session import Session
|
lino-framework/xl
|
lino_xl/lib/sepa/models.py
|
# -*- coding: UTF-8 -*-
# Copyright 2014-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from __future__ import unicode_literals
from django.db import models
from lino.api import dd, _, rt
from lino.core.diff import ChangeWatcher
from lino.utils.format_date import fds
from .fields import IBANField, BICField, IBAN_FORMFIELD
from .utils import belgian_nban_to_iban_bic, iban2bic
from .roles import SepaUser, SepaStaff
from lino_xl.lib.contacts.roles import ContactsUser
class Account(dd.Model):
class Meta:
app_label = 'sepa'
abstract = dd.is_abstract_model(__name__, 'Account')
verbose_name = _("Bank account")
verbose_name_plural = _("Bank accounts")
partner = dd.ForeignKey(
'contacts.Partner',
related_name='sepa_accounts', null=True, blank=True)
iban = IBANField(verbose_name=_("IBAN"))
bic = BICField(verbose_name=_("BIC"), blank=True)
remark = models.CharField(_("Remark"), max_length=200, blank=True)
primary = models.BooleanField(
_("Primary"),
default=False,
help_text=_(
"Enabling this field will automatically disable any "
"previous primary account and update "
"the partner's IBAN and BIC"))
allow_cascaded_delete = ['partner']
def __str__(self):
return IBAN_FORMFIELD.prepare_value(self.iban)
# if self.remark:
# return "{0} ({1})".format(self.iban, self.remark)
# return self.iban
def full_clean(self):
if self.iban and not self.bic:
if self.iban[0].isdigit():
iban, bic = belgian_nban_to_iban_bic(self.iban)
self.bic = bic
self.iban = iban
else:
self.bic = iban2bic(self.iban) or ''
super(Account, self).full_clean()
def after_ui_save(self, ar, cw):
super(Account, self).after_ui_save(ar, cw)
if self.primary:
mi = self.partner
for o in mi.sepa_accounts.exclude(id=self.id):
if o.primary:
o.primary = False
o.save()
ar.set_response(refresh_all=True)
watcher = ChangeWatcher(mi)
for k in PRIMARY_FIELDS:
setattr(mi, k, getattr(self, k))
mi.save()
watcher.send_update(ar)
@dd.displayfield(_("Statements"))
def statements(self, ar):
if ar is None or not dd.is_installed('b2c'):
return ''
Account = rt.models.b2c.Account
try:
b2c = Account.objects.get(iban=self.iban)
except Account.DoesNotExist:
return ''
return ar.obj2html(b2c, fds(b2c.last_transaction))
PRIMARY_FIELDS = dd.fields_list(Account, 'iban bic')
class Accounts(dd.Table):
required_roles = dd.login_required(SepaStaff)
model = 'sepa.Account'
class AccountsByPartner(Accounts):
required_roles = dd.login_required((ContactsUser, SepaUser))
master_key = 'partner'
column_names = 'iban bic remark primary *'
order_by = ['iban']
stay_in_grid = True
auto_fit_column_widths = True
insert_layout = """
iban bic
remark
"""
dd.inject_field(
'ledger.Journal',
'sepa_account',
dd.ForeignKey('sepa.Account', blank=True, null=True))
|
eucalyptus/se34euca
|
se34euca/lib/EucaUITestLib_IP_Address.py
|
from se34euca.lib.EucaUITestLib_Base import *
class EucaUITestLib_IP_Address(EucaUITestLib_Base):
def test_ui_allocate_ip_address(self, ip_count):
print
print "Started Test: Allocate IP Address: IP_COUNT " + str(ip_count)
print
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page IP Address"
self.click_element_by_id("dashboard-netsec-eip")
time.sleep(3)
self.click_element_by_id("table-eips-new")
self.verify_element_by_id("eip-allocate-count")
print
print "Test: Allocate IP Address"
self.set_keys_by_id("eip-allocate-count", str(ip_count))
self.click_element_by_id("eip-allocate-btn")
print
print "Finished: Allocate IP Addresses"
print
return 0
def test_ui_check_ip_address_count(self, ip_count):
print
print "Started Test: Check IP Address Count"
print
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
time.sleep(3)
print "Verifying that IP Address Count on Dashboard is " + ip_count
self.verify_text_displayed_by_css("#dashboard-netsec-eip > span", ip_count)
print
print "Finished Test: IP Address Count"
print
return 0
def test_ui_release_ip_address_all(self):
print
print "Started Test: Release IP Address"
print
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page IP Address"
self.click_element_by_css_selector("#dashboard-netsec-eip > span")
time.sleep(3)
self.click_element_by_id("eips-check-all")
self.click_element_by_id("more-actions-eips")
self.click_element_by_link_text("Release to cloud")
self.click_element_by_id("btn-eips-release-release")
print
print "Finished: Release IP Address"
print
return 0
def test_ui_get_available_ip_address(self):
'''
Returns an available IP address at random
'''
print
print "Started Test: Get Available IP Address"
print
self.click_element_by_link_text("Dashboard")
self.click_element_by_link_text("Network & Security")
self.click_element_by_link_text("IP Addresses")
time.sleep(3)
self.click_element_by_css_selector("div.VS-search-inner")
self.click_element_by_link_text("Assignment")
self.click_element_by_link_text("Unassigned")
time.sleep(3)
available_ip = self.get_text_by_xpath("//table[@id='eips']/tbody/tr/td[2]")
print
print "Finished Test: Get Available IP Address. Returning IP: " + available_ip
print
return available_ip
if __name__ == "__main__":
unittest.main()
|
jkafader/trough
|
tests/wsgi/test_segment_manager.py
|
import pytest
from trough.wsgi.segment_manager import server
import ujson
import trough
from trough.settings import settings
import doublethink
import rethinkdb as r
import requests # :-\ urllib3?
import hdfs3
import time
import tempfile
import os
import sqlite3
import logging
import socket
trough.settings.configure_logging()
@pytest.fixture(scope="module")
def segment_manager_server():
server.testing = True
return server.test_client()
def test_simple_provision(segment_manager_server):
result = segment_manager_server.get('/')
assert result.status == '405 METHOD NOT ALLOWED'
# hasn't been provisioned yet
result = segment_manager_server.post('/', data='test_simple_provision_segment')
assert result.status_code == 200
assert result.mimetype == 'text/plain'
assert b''.join(result.response).endswith(b':6222/?segment=test_simple_provision_segment')
# now it has already been provisioned
result = segment_manager_server.post('/', data='test_simple_provision_segment')
assert result.status_code == 200
assert result.mimetype == 'text/plain'
assert b''.join(result.response).endswith(b':6222/?segment=test_simple_provision_segment')
def test_provision(segment_manager_server):
result = segment_manager_server.get('/provision')
assert result.status == '405 METHOD NOT ALLOWED'
# hasn't been provisioned yet
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_provision_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes) # ujson accepts bytes! 😻
assert result_dict['write_url'].endswith(':6222/?segment=test_provision_segment')
# now it has already been provisioned
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_provision_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict['write_url'].endswith(':6222/?segment=test_provision_segment')
def test_provision_with_schema(segment_manager_server):
schema = '''CREATE TABLE test (id INTEGER PRIMARY KEY AUTOINCREMENT, test varchar(4));
INSERT INTO test (test) VALUES ("test");'''
# create a schema by submitting sql
result = segment_manager_server.put(
'/schema/test1/sql', content_type='applicaton/sql', data=schema)
assert result.status_code == 201
# provision a segment with that schema
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_provision_with_schema_1', 'schema':'test1'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes) # ujson accepts bytes! 😻
assert result_dict['write_url'].endswith(':6222/?segment=test_provision_with_schema_1')
# get db read url from rethinkdb
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
query = rethinker.table('services').get_all('test_provision_with_schema_1', index='segment').filter({'role': 'trough-read'}).filter(lambda svc: r.now().sub(svc['last_heartbeat']).lt(svc['ttl'])).order_by('load')[0]
healthy_segment = query.run()
read_url = healthy_segment.get('url')
assert read_url.endswith(':6444/?segment=test_provision_with_schema_1')
# run a query to check that the schema was used
sql = 'SELECT * FROM test;'
with requests.post(read_url, stream=True, data=sql) as response:
assert response.status_code == 200
result = ujson.loads(response.text)
assert result == [{'test': 'test', 'id': 1}]
# delete the schema from rethinkdb for the sake of other tests
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
result = rethinker.table('schema').get('test1').delete().run()
assert result == {'deleted': 1, 'inserted': 0, 'skipped': 0, 'errors': 0, 'unchanged': 0, 'replaced': 0}
def test_schemas(segment_manager_server):
# initial list of schemas
result = segment_manager_server.get('/schema')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_list = ujson.loads(result_bytes)
assert set(result_list) == {'default'}
# existent schema as json
result = segment_manager_server.get('/schema/default')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'default', 'sql': ''}
# existent schema sql
result = segment_manager_server.get('/schema/default/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b''
# schema doesn't exist yet
result = segment_manager_server.get('/schema/schema1')
assert result.status_code == 404
# schema doesn't exist yet
result = segment_manager_server.get('/schema/schema1/sql')
assert result.status_code == 404
# bad request: POST not accepted (must be PUT)
result = segment_manager_server.post('/schema/schema1', data='{}')
assert result.status_code == 405
result = segment_manager_server.post('/schema/schema1/sql', data='')
assert result.status_code == 405
# bad request: invalid json
result = segment_manager_server.put(
'/schema/schema1', data=']]}what the not valid json' )
assert result.status_code == 400
assert b''.join(result.response) == b'input could not be parsed as json'
# bad request: id in json does not match url
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'id': 'schema2', 'sql': ''}))
assert result.status_code == 400
assert b''.join(result.response) == b"id in json 'schema2' does not match id in url 'schema1'"
# bad request: missing sql
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'id': 'schema1'}))
assert result.status_code == 400
assert b''.join(result.response) == b"input json has keys {'id'} (should be {'id', 'sql'})"
# bad request: missing id
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'sql': ''}))
assert result.status_code == 400
assert b''.join(result.response) == b"input json has keys {'sql'} (should be {'id', 'sql'})"
# bad request: invalid sql
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'id': 'schema1', 'sql': 'create create table table blah blooofdjaio'}))
assert result.status_code == 400
assert b''.join(result.response) == b'schema sql failed validation: near "create": syntax error'
# create new schema by submitting sql
result = segment_manager_server.put(
'/schema/schema1/sql', content_type='applicaton/sql',
data='create table foo (bar varchar(100));')
assert result.status_code == 201
# get the new schema as json
result = segment_manager_server.get('/schema/schema1')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'schema1', 'sql': 'create table foo (bar varchar(100));'}
# get the new schema as sql
result = segment_manager_server.get('/schema/schema1/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b'create table foo (bar varchar(100));'
# create new schema by submitting json
result = segment_manager_server.put(
'/schema/schema2', content_type='applicaton/sql',
data=ujson.dumps({'id': 'schema2', 'sql': 'create table schema2_table (foo varchar(100));'}))
assert result.status_code == 201
# get the new schema as json
result = segment_manager_server.get('/schema/schema2')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'schema2', 'sql': 'create table schema2_table (foo varchar(100));'}
# get the new schema as sql
result = segment_manager_server.get('/schema/schema2/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b'create table schema2_table (foo varchar(100));'
# updated list of schemas
result = segment_manager_server.get('/schema')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_list = ujson.loads(result_bytes)
assert set(result_list) == {'default', 'schema1', 'schema2'}
# overwrite schema1 with json api
result = segment_manager_server.put(
'/schema/schema1', content_type='applicaton/json',
data=ujson.dumps({'id': 'schema1', 'sql': 'create table blah (toot varchar(100));'}))
assert result.status_code == 204
# get the modified schema as sql
result = segment_manager_server.get('/schema/schema1/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b'create table blah (toot varchar(100));'
# overwrite schema1 with sql api
result = segment_manager_server.put(
'/schema/schema1/sql', content_type='applicaton/sql',
data='create table haha (hehehe varchar(100));')
assert result.status_code == 204
# get the modified schema as json
result = segment_manager_server.get('/schema/schema1')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'schema1', 'sql': 'create table haha (hehehe varchar(100));'}
# updated list of schemas
result = segment_manager_server.get('/schema')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_list = ujson.loads(result_bytes)
assert set(result_list) == {'default', 'schema1', 'schema2'}
# XXX DELETE?
def test_promotion(segment_manager_server):
hdfs = hdfs3.HDFileSystem(settings['HDFS_HOST'], settings['HDFS_PORT'])
hdfs.rm(settings['HDFS_PATH'])
hdfs.mkdir(settings['HDFS_PATH'])
result = segment_manager_server.get('/promote')
assert result.status == '405 METHOD NOT ALLOWED'
# provision a test segment for write
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_promotion'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict['write_url'].endswith(':6222/?segment=test_promotion')
write_url = result_dict['write_url']
# write something into the db
sql = ('create table foo (bar varchar(100));\n'
'insert into foo (bar) values ("testing segment promotion");\n')
response = requests.post(write_url, sql)
assert response.status_code == 200
# shouldn't be anything in hdfs yet...
expected_remote_path = os.path.join(
settings['HDFS_PATH'], 'test_promot', 'test_promotion.sqlite')
with pytest.raises(FileNotFoundError):
hdfs.ls(expected_remote_path, detail=True)
# now write to the segment and promote it to HDFS
before = time.time()
time.sleep(1.5)
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_promotion'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'remote_path': expected_remote_path}
# make sure it doesn't think the segment is under promotion
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
query = rethinker.table('lock').get('write:lock:test_promotion')
result = query.run()
assert not result.get('under_promotion')
# let's see if it's hdfs
listing_after_promotion = hdfs.ls(expected_remote_path, detail=True)
assert len(listing_after_promotion) == 1
assert listing_after_promotion[0]['last_mod'] > before
# grab the file from hdfs and check the content
# n.b. copy created by sqlitebck may have different size, sha1 etc from orig
size = None
with tempfile.TemporaryDirectory() as tmpdir:
local_copy = os.path.join(tmpdir, 'test_promotion.sqlite')
hdfs.get(expected_remote_path, local_copy)
conn = sqlite3.connect(local_copy)
cur = conn.execute('select * from foo')
assert cur.fetchall() == [('testing segment promotion',)]
conn.close()
size = os.path.getsize(local_copy)
# test promotion when there is an assignment in rethinkdb
rethinker.table('assignment').insert({
'assigned_on': doublethink.utcnow(),
'bytes': size,
'hash_ring': 0 ,
'id': 'localhost:test_promotion',
'node': 'localhost',
'remote_path': expected_remote_path,
'segment': 'test_promotion'}).run()
# promote it to HDFS
before = time.time()
time.sleep(1.5)
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_promotion'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'remote_path': expected_remote_path}
# make sure it doesn't think the segment is under promotion
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
query = rethinker.table('lock').get('write:lock:test_promotion')
result = query.run()
assert not result.get('under_promotion')
# let's see if it's hdfs
listing_after_promotion = hdfs.ls(expected_remote_path, detail=True)
assert len(listing_after_promotion) == 1
assert listing_after_promotion[0]['last_mod'] > before
# pretend the segment is under promotion
rethinker.table('lock')\
.get('write:lock:test_promotion')\
.update({'under_promotion': True}).run()
assert rethinker.table('lock')\
.get('write:lock:test_promotion').run()\
.get('under_promotion')
with pytest.raises(Exception):
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_promotion'}))
def test_delete_segment(segment_manager_server):
hdfs = hdfs3.HDFileSystem(settings['HDFS_HOST'], settings['HDFS_PORT'])
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
# initially, segment doesn't exist
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 404
# provision segment
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_delete_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict['write_url'].endswith(':6222/?segment=test_delete_segment')
write_url = result_dict['write_url']
# write something into the db
sql = ('create table foo (bar varchar(100));\n'
'insert into foo (bar) values ("testing segment deletion");\n')
response = requests.post(write_url, sql)
assert response.status_code == 200
# check that local file exists
local_path = os.path.join(
settings['LOCAL_DATA'], 'test_delete_segment.sqlite')
assert os.path.exists(local_path)
# check that attempted delete while under write returns 400
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 400
# shouldn't be anything in hdfs yet
expected_remote_path = os.path.join(
settings['HDFS_PATH'], 'test_delete_segm',
'test_delete_segment.sqlite')
with pytest.raises(FileNotFoundError):
hdfs.ls(expected_remote_path, detail=True)
# promote segment to hdfs
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_delete_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'remote_path': expected_remote_path}
# let's see if it's hdfs
hdfs_ls = hdfs.ls(expected_remote_path, detail=True)
assert len(hdfs_ls) == 1
# add an assignment (so we can check it is deleted successfully)
rethinker.table('assignment').insert({
'assigned_on': doublethink.utcnow(),
'bytes': os.path.getsize(local_path),
'hash_ring': 0 ,
'id': '%s:test_delete_segment' % socket.gethostname(),
'node': socket.gethostname(),
'remote_path': expected_remote_path,
'segment': 'test_delete_segment'}).run()
# check that service entries, assignment exist
assert rethinker.table('services')\
.get('trough-read:%s:test_delete_segment' % socket.gethostname())\
.run()
assert rethinker.table('services')\
.get('trough-write:%s:test_delete_segment' % socket.gethostname())\
.run()
assert rethinker.table('assignment')\
.get('%s:test_delete_segment' % socket.gethostname()).run()
# check that attempted delete while under write returns 400
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 400
# delete the write lock
assert rethinker.table('lock')\
.get('write:lock:test_delete_segment').delete().run() == {
'deleted': 1, 'errors': 0, 'inserted': 0,
'replaced': 0 , 'skipped': 0 , 'unchanged': 0, }
# delete the segment
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 204
# check that service entries and assignment are gone
assert not rethinker.table('services')\
.get('trough-read:%s:test_delete_segment' % socket.gethostname())\
.run()
assert not rethinker.table('services')\
.get('trough-write:%s:test_delete_segment' % socket.gethostname())\
.run()
assert not rethinker.table('assignment')\
.get('%s:test_delete_segment' % socket.gethostname()).run()
# check that local file is gone
assert not os.path.exists(local_path)
# check that file is gone from hdfs
with pytest.raises(FileNotFoundError):
hdfs_ls = hdfs.ls(expected_remote_path, detail=True)
|
svebk/DeepSentiBank_memex
|
cu_image_search/hasher/hasher_swig.py
|
import os
import pwd
import sys
import time
import json
import shutil
import random
import subprocess
import numpy as np
from .generic_hasher import GenericHasher
from ..memex_tools.image_dl import mkpath
from ..memex_tools.binary_file import read_binary_file
# should me move the _hasher_obj_py.so?
#from ..hashing_new.python import _hasher_obj_py
import _hasher_obj_py as hop
class HasherSwig(GenericHasher):
def __init__(self,global_conf_filename):
self.global_conf = json.load(open(global_conf_filename,'rt'))
self.base_update_path = os.path.dirname(__file__)
self.base_model_path = os.path.join(os.path.dirname(__file__),'../../data/')
if 'LI_base_update_path' in self.global_conf:
self.base_update_path = self.global_conf['LI_base_update_path']
if 'HA_base_update_path' in self.global_conf:
self.base_update_path = self.global_conf['HA_base_update_path']
if 'HA_path' in self.global_conf:
self.hashing_execpath = os.path.join(os.path.dirname(__file__),self.global_conf['HA_path'])
else:
self.hashing_execpath = os.path.join(os.path.dirname(__file__),'../hashing/')
if 'HA_exec' in self.global_conf:
self.hashing_execfile = self.global_conf['HA_exec']
else:
self.hashing_execfile = 'hashing'
self.features_dim = self.global_conf['FE_features_dim']
self.bits_num = self.global_conf['HA_bits_num']
self.hashing_outpath = os.path.join(self.base_update_path,'hash_bits/')
mkpath(self.hashing_outpath)
# need to be able to set/get master_update file in HasherObjectPy too.
self.master_update_file = "update_list_dev.txt"
if 'HA_master_update_file' in self.global_conf:
print("Setting HA_master_update_file is not yet supported for HasherSwig")
sys.exit(-1)
self.master_update_file = self.global_conf['HA_master_update_file']
self.hasher = hop.new_HasherObjectPy()
hop.HasherObjectPy_set_feature_dim(self.hasher, self.features_dim)
hop.HasherObjectPy_set_bit_num(self.hasher, self.bits_num)
hop.HasherObjectPy_set_base_updatepath(self.hasher, str(self.base_update_path))
#hop.HasherObjectPy_set_base_modelpath(self.hasher, "/home/ubuntu/memex/data/")
# Model files still need to be in self.hashing_execfile for updates...
hop.HasherObjectPy_set_base_modelpath(self.hasher, str(self.base_model_path))
self.init_hasher()
def __del__(self):
# clean exit deleting SWIG object
hop.delete_HasherObjectPy(self.hasher)
def init_hasher(self):
status = hop.HasherObjectPy_initialize(self.hasher)
if status != 0:
print("Hasher was not able to initialize")
sys.exit(-1)
def compute_hashcodes(self,features_filename,ins_num,startid):
""" Compute ITQ hashcodes for the features in 'features_filename'
:param features_filename: filepath for the binary file containing the features
:type features_filename: string
:param ins_num: number of features in 'features_filename'
:type ins_num: integer
:returns hashbits_filepath: filepath for the binary file containing the hashcodes
"""
feature_filepath = features_filename[:-4]+'_norm'
# we could be passing additional arguments here
command = self.hashing_execpath+'hashing_update '+features_filename+' '+str(ins_num)+' '+self.hashing_execpath
proc = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
print "[HasherSwig.compute_hashcodes: log] running command: {}".format(command)
sys.stdout.flush()
(out, err) = proc.communicate()
print "[HasherSwig.compute_hashcodes: log] program output:", out
print "[HasherSwig.compute_hashcodes: log] program error:", err
sys.stdout.flush()
#print command
#os.system(command)
hashbits_filepath = os.path.join(self.hashing_outpath,str(startid)+'_itq_norm_'+str(self.bits_num))
itq_output_path = features_filename[:-4] + '_itq_norm_'+str(self.bits_num)
print "[HasherSwig.compute_hashcodes: log] Moving {} to {}.".format(itq_output_path,hashbits_filepath)
shutil.move(itq_output_path, hashbits_filepath)
os.remove(features_filename)
return hashbits_filepath
def get_max_feat_id(self):
""" Returns number of images indexed based on the size of hashcodes files.
"""
total_nb = 0
try:
with open(os.path.join(self.base_update_path,self.master_update_file),'rt') as master_file:
# sum up sizes of files in master_file
for line in master_file:
statinfo = os.stat(os.path.join(self.hashing_outpath,line.strip()+'_itq_norm_'+str(self.bits_num)))
total_nb += statinfo.st_size*8/self.bits_num
except Exception as inst:
print "[HasherSwig.get_max_feat_id: error] {}".format(inst)
return total_nb
def compress_feats(self):
""" Compress the features with zlib.
"""
mkpath(os.path.join(self.base_update_path,'comp_features'))
mkpath(os.path.join(self.base_update_path,'comp_idx'))
args = [self.base_update_path+'/', str(self.features_dim), '1', self.master_update_file, str(self.bits_num)]
subprocess_command = [self.hashing_execpath+"compress_feats"] + args
# this will work only if features to be compressed are present in self.base_update_path/features
proc = subprocess.Popen(subprocess_command, stdout=subprocess.PIPE)
print "[HasherSwig.compress_feats: log] running command: {}".format(subprocess_command)
(out, err) = proc.communicate()
print "[HasherSwig.compress_feats: log] program output:", out
print "[HasherSwig.compress_feats: log] program error:", err
def get_precomp_X(self,list_feats_id,str_precomp,read_dim,read_type):
import struct
query_time = time.time()
# save queries id in binary file
query_precomp_fn = "{}_query_{}_p{}_{}".format(str_precomp, query_time, os.getpid(), random.random())
X_fn = "{}_{}".format(str_precomp,query_time)
with open(query_precomp_fn,"wb") as f_prein:
for feat_id in list_feats_id:
f_prein.write(struct.pack('i',feat_id))
# query for features
command = self.hashing_execpath+"get_precomp_{} {} {} {}".format(str_precomp,query_precomp_fn,X_fn,self.base_update_path)
print("[HasherSwig.get_precomp_X: log] running command: {}".format(command))
sys.stdout.flush()
os.system(command)
# read features/hashcodes
X, ok_ids = read_binary_file(X_fn,str_precomp,list_feats_id,read_dim,read_type)
#print X,X[0].shape
# cleanup
os.remove(query_precomp_fn)
os.remove(X_fn)
return X,ok_ids
def get_precomp_feats(self,list_feats_id):
""" Get precomputed features from 'list_feats_id'
"""
return self.get_precomp_X(list_feats_id,"feats",self.features_dim*4,np.float32)
def get_precomp_hashcodes(self,list_feats_id):
""" Get precomputed hashcodes from 'list_feats_id'
"""
return self.get_precomp_X(list_feats_id,"hashcodes",self.bits_num/8,np.uint8)
def get_similar_images_from_featuresfile(self, featurefilename, ratio, near_dup_th=-1.0):
""" Get similar images of the images with features in 'featurefilename'.
:param featurefilename: features of the query images.
:type featurefilename: string
:param ratio: ratio of images retrieved with hashing that will be reranked.
:type ratio: float
:param near_dup_th: near dup threshold, if positive, only images below this distance value will be returned.
:type near_dup_th: float
:returns simname: filename of the simname text file.
"""
hop.HasherObjectPy_set_ratio(self.hasher, ratio)
# needed?
sys.stdout = sys.stderr
print "[HasherSwig.get_similar_images: log] preparing search for {}".format(featurefilename)
hop.HasherObjectPy_set_near_dup_th(self.hasher, near_dup_th)
hop.HasherObjectPy_set_query_feats_from_disk(self.hasher, featurefilename)
hop.HasherObjectPy_set_outputfile(self.hasher, featurefilename[:-4])
hop.HasherObjectPy_find_knn(self.hasher)
initname = featurefilename[:-4] + '-sim.txt'
simname = featurefilename[:-4] + '-sim_'+str(ratio)+'.txt'
print "[HasherSwig.get_similar_images: log] try to rename {} to {}".format(initname,simname)
# this would raise an error if results have not been computed
os.rename(initname,simname)
return simname
def get_similar_images_from_featuresfile_nodiskout(self, featurefilename, ratio, demote=False):
""" Get similar images of the images with features in 'featurefilename'.
:param featurefilename: features of the query images.
:type featurefilename: string
:param ratio: ratio of images retrieved with hashing that will be reranked.
:type ratio: float
:returns simlist: list of nearest neighbors of each query
"""
hop.HasherObjectPy_set_ratio(self.hasher, ratio)
# needed?
sys.stdout = sys.stderr
hop.HasherObjectPy_set_query_feats_from_disk(self.hasher, featurefilename)
hop.HasherObjectPy_set_outputfile(self.hasher, featurefilename[:-4])
out_res = hop.HasherObjectPy_find_knn_nodiskout(self.hasher)
print "[HasherSwig.get_similar_images_from_featuresfile_nodiskout: log] out_res: {}".format(out_res)
return out_res
|
ponty/MyElectronicProjects
|
docs/projects/usbasp/usbasp_reset_old.py
|
#!/usr/bin/env python
import fcntl
import usb
ID_VENDOR = 0x16c0
ID_PRODUCT = 0x05dc
USBDEVFS_RESET = 21780
class Device:
def __init__(self):
''
@classmethod
def find(cls, idVendor, idProduct):
print("searching for device (%x:%x)" % (idVendor, idProduct))
for bus in usb.busses():
for dev in bus.devices:
if idVendor == dev.idVendor:
if idProduct == dev.idProduct:
d = Device()
d.bus = bus
d.dev = dev
return d
print("device not found")
@property
def usbfs_filename(self):
return '/dev/bus/usb/%s/%s' % (self.bus.dirname, self.dev.filename)
def reset(self):
print("Resetting USB device %s" % self.usbfs_filename)
with open(self.usbfs_filename, 'w') as fd:
rc = fcntl.ioctl(fd, USBDEVFS_RESET, 0)
if (rc < 0):
print("Error in ioctl")
d = Device.find(ID_VENDOR, ID_PRODUCT)
if d:
d.reset()
print("Reset successful\n")
|
Sythelux/Picarto.bundle
|
Contents/Libraries/Shared/PicartoClientAPI/models/thumbnail.py
|
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Thumbnail(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'web': 'str',
'web_large': 'str',
'mobile': 'str',
'tablet': 'str'
}
attribute_map = {
'web': 'web',
'web_large': 'web_large',
'mobile': 'mobile',
'tablet': 'tablet'
}
def __init__(self, web=None, web_large=None, mobile=None, tablet=None):
"""
Thumbnail - a model defined in Swagger
"""
self._web = None
self._web_large = None
self._mobile = None
self._tablet = None
if web is not None:
self.web = web
if web_large is not None:
self.web_large = web_large
if mobile is not None:
self.mobile = mobile
if tablet is not None:
self.tablet = tablet
@property
def web(self):
"""
Gets the web of this Thumbnail.
Web size
:return: The web of this Thumbnail.
:rtype: str
"""
return self._web
@web.setter
def web(self, web):
"""
Sets the web of this Thumbnail.
Web size
:param web: The web of this Thumbnail.
:type: str
"""
self._web = web
@property
def web_large(self):
"""
Gets the web_large of this Thumbnail.
Web HD size
:return: The web_large of this Thumbnail.
:rtype: str
"""
return self._web_large
@web_large.setter
def web_large(self, web_large):
"""
Sets the web_large of this Thumbnail.
Web HD size
:param web_large: The web_large of this Thumbnail.
:type: str
"""
self._web_large = web_large
@property
def mobile(self):
"""
Gets the mobile of this Thumbnail.
Mobile size
:return: The mobile of this Thumbnail.
:rtype: str
"""
return self._mobile
@mobile.setter
def mobile(self, mobile):
"""
Sets the mobile of this Thumbnail.
Mobile size
:param mobile: The mobile of this Thumbnail.
:type: str
"""
self._mobile = mobile
@property
def tablet(self):
"""
Gets the tablet of this Thumbnail.
Tablet size
:return: The tablet of this Thumbnail.
:rtype: str
"""
return self._tablet
@tablet.setter
def tablet(self, tablet):
"""
Sets the tablet of this Thumbnail.
Tablet size
:param tablet: The tablet of this Thumbnail.
:type: str
"""
self._tablet = tablet
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Thumbnail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
dogukantufekci/supersalon
|
supersalon/products/migrations/0002_auto_20151112_1729.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='due_period',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Due Period (in days)', blank=True),
),
migrations.AddField(
model_name='product',
name='reminder_day_count_after_due_date',
field=models.PositiveSmallIntegerField(verbose_name='Reminder Day Count Before After Date', default=12),
),
migrations.AddField(
model_name='product',
name='reminder_day_count_before_due_date',
field=models.PositiveSmallIntegerField(verbose_name='Reminder Day Count Before Due Date', default=2),
),
]
|
DjenieLabs/django-magic-content-calendarevents
|
magiccontentcalendarevents/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse, Http404
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import ListView, TemplateView
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from accounts.mixins import OwnerRequiredMixin, CanEditMixin
from magiccontent.mixins import (ListContentMixin, EditableMixin,
CreateContentMixin, )
from magiccontent.views import MagicDeleteView, PictureUpdateView
from magiccontent.models import Widget
from .models import CalendarEventContent
from .forms import EventContentForm, EventContentCreateForm
from .util import events_to_json
class EventContentMixin(object):
model = CalendarEventContent
form_class = EventContentForm
template_name = 'magiccontent/calendareventcontent_form.html'
class EventContentCreateView(CreateContentMixin, EventContentMixin,
EditableMixin, CreateView):
form_class = EventContentCreateForm
class EventContentUpdateView(EventContentMixin, EditableMixin, UpdateView):
pass
class EventContentPictureUpdateView(EventContentMixin, EditableMixin,
PictureUpdateView):
template_name = 'magiccontent/defaultcontent_image_form.html'
class EventContentDeleteView(EventContentMixin, OwnerRequiredMixin,
MagicDeleteView):
pass
class EventContentOrderListView(ListContentMixin, EventContentMixin,
OwnerRequiredMixin, ListView):
pass
class ShowCalendarContentPageView(CanEditMixin, TemplateView):
template_name = "magiccontent/calendar.html"
def get_context_data(self, **kwargs):
context = super(ShowCalendarContentPageView,
self).get_context_data(**kwargs)
widget = get_object_or_404(Widget, pk=self.kwargs.get('pk', None))
context['widget'] = widget
context['content_list'] = widget.get_widget_type.objects.filter(
widget=widget)
event_url = reverse('calendarcontent.events.list',
kwargs={'pk': widget.id})
context['events_list_url'] = event_url
return context
class ShowCalendarContentItemView(CanEditMixin, TemplateView):
template_name = "magiccontent/item.html"
def dispatch(self, request, *args, **kws):
''' verify if the slug really exists avoiding Search Engine to index
anything like /same-content/wiget_pk/pk /same-content-2/wiget_pk/pk
Duplicated content has a huge negative impact in SEO!
'''
slug = self.kwargs.get('slug', None)
if not slug:
raise Http404('no slug')
try:
self.event = CalendarEventContent.site_objects.get(
pk=self.kwargs.get('pk', '$'))
except CalendarEventContent.DoesNotExist:
raise Http404('CalendarEventContent not found')
if self.event.slug != slug:
raise Http404('invalid slug')
return super(ShowCalendarContentItemView, self).dispatch(
request, *args, **kws)
def get_context_data(self, **kwargs):
context = super(ShowCalendarContentItemView,
self).get_context_data(**kwargs)
context['widget'] = self.event.widget
context['object'] = self.event
return context
def events_list(request, **kwargs):
"""
Returns all events from the given widget calendar.
"""
widget = get_object_or_404(Widget, pk=kwargs.get('pk', None))
events = CalendarEventContent.site_objects.filter(widget=widget)
return HttpResponse(events_to_json(events),
content_type='application/json')
|
leighpauls/k2cro4
|
third_party/webdriver/pylib/selenium/webdriver/remote/webelement.py
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WebElement implementation."""
import os
import zipfile
from StringIO import StringIO
import base64
from command import Command
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value."""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = unicode(resp['value'])
if type(resp['value']) is bool:
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected."""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element by id."""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element by name."""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element by link text."""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath."""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath."""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element by their class name."""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds elements by their class name."""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element by CSS selector."""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements by CSS selector."""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element."""
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
return self._execute(Command.GET_ELEMENT_LOCATION)['value']
@property
def parent(self):
return self._parent
@property
def id(self):
return self._id
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def _upload(self, filename):
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
try:
return self._execute(Command.UPLOAD_FILE,
{'file': base64.encodestring(fp.getvalue())})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
if os.path.exists(file_path):
return file_path
else:
return None
|
kumar303/rockit
|
vendor-local/djcelery/management/commands/celeryd_detach.py
|
"""
Start detached worker node from the Django management utility.
"""
import os
import sys
from celery.bin import celeryd_detach
from djcelery.management.base import CeleryCommand
class Command(CeleryCommand):
"""Run the celery daemon."""
help = 'Runs a detached Celery worker node.'
requires_model_validation = True
option_list = celeryd_detach.OPTION_LIST
def run_from_argv(self, argv):
class detached(celeryd_detach.detached_celeryd):
execv_argv = [os.path.abspath(sys.argv[0]), "celeryd"]
detached().execute_from_commandline(argv)
|
kbsezginel/raspberry-pi
|
rpi-web-server/camera/app-camera.py
|
import datetime
from flask import Flask, render_template, redirect, url_for, request
import picamera
from time import sleep
app = Flask(__name__)
@app.route('/')
def index():
index_data = {'high': weather['high'], 'low': weather['low'], 'bus': bus_data}
return render_template('index.html', **index_data)
@app.route('/camera')
def camera_page():
return render_template('camera.html')
# Route for sending RF signal to outlets
@app.route('/postmethod', methods=['POST'])
def get_post():
outlet, status = request.form['outlet'], request.form['status']
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M")
print('Time: %s | Outlet: %s | Status: %s' % (time, outlet, status))
rf_send(outlet, status)
if blink_settings['blink']:
blink(led_settings[status], led_settings['num'], led_settings['speed'])
return outlet
@app.route('/postcamera', methods=['POST'])
def get_camera():
req = request.form['request']
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d_%H-%M")
if req == 'photo':
filename = '%s.jpg' % time
camera.capture(filename)
elif req == 'video':
filename = '%s.h264' % time
camera.start_recording(filename)
sleep(5)
camera.stop_recording()
return filename
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
puttarajubr/commcare-hq
|
custom/ewsghana/__init__.py
|
from custom.ewsghana.comparison_report import ProductsCompareReport, LocationsCompareReport,\
SMSUsersCompareReport, WebUsersCompareReport, SupplyPointsCompareReport
from custom.ewsghana.reports.email_reports import CMSRMSReport, StockSummaryReport
from custom.ewsghana.reports.maps import EWSMapReport
from custom.ewsghana.reports.stock_levels_report import StockLevelsReport
from custom.ewsghana.reports.specific_reports.dashboard_report import DashboardReport
from custom.ewsghana.reports.specific_reports.stock_status_report import StockStatus
from custom.ewsghana.reports.specific_reports.reporting_rates import ReportingRatesReport
from custom.ewsghana.reports.stock_transaction import StockTransactionReport
LOCATION_TYPES = ["country", "region", "district", "facility"]
CUSTOM_REPORTS = (
('Custom reports', (
DashboardReport,
StockStatus,
StockLevelsReport,
ReportingRatesReport,
EWSMapReport,
CMSRMSReport,
StockSummaryReport,
StockTransactionReport
)),
('Compare reports', (
ProductsCompareReport,
LocationsCompareReport,
SupplyPointsCompareReport,
WebUsersCompareReport,
SMSUsersCompareReport,
))
)
|
goddardl/gaffer
|
python/GafferImageUI/ImageSamplerUI.py
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferImage
Gaffer.Metadata.registerNode(
GafferImage.ImageSampler,
"description",
"""
Samples image colour at a specified pixel location.
""",
plugs = {
"image" : [
"description",
"""
The image to be sampled.
""",
],
"pixel" : [
"description",
"""
The coordinates of the pixel to sample. These can have
fractional values - the filter will be used to generate
appropriate interpolate values.
""",
],
"filter" : [
"description",
"""
The filter used to generate interpolated pixel values.
""",
],
"color" : [
"description",
"""
The sampled colour.
""",
]
}
)
|
jorge-marques/wagtail
|
wagtail/wagtailcore/rich_text.py
|
import re # parsing HTML with regexes LIKE A BOSS.
from django.utils.html import escape
from wagtail.wagtailcore.whitelist import Whitelister
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore import hooks
# Define a set of 'embed handlers' and 'link handlers'. These handle the translation
# of 'special' HTML elements in rich text - ones which we do not want to include
# verbatim in the DB representation because they embed information which is stored
# elsewhere in the database and is liable to change - from real HTML representation
# to DB representation and back again.
class PageLinkHandler(object):
"""
PageLinkHandler will be invoked whenever we encounter an <a> element in HTML content
with an attribute of data-linktype="page". The resulting element in the database
representation will be:
<a linktype="page" id="42">hello world</a>
"""
@staticmethod
def get_db_attributes(tag):
"""
Given an <a> tag that we've identified as a page link embed (because it has a
data-linktype="page" attribute), return a dict of the attributes we should
have on the resulting <a linktype="page"> element.
"""
return {'id': tag['data-id']}
@staticmethod
def expand_db_attributes(attrs, for_editor):
try:
page = Page.objects.get(id=attrs['id'])
if for_editor:
editor_attrs = 'data-linktype="page" data-id="%d" ' % page.id
else:
editor_attrs = ''
return '<a %shref="%s">' % (editor_attrs, escape(page.url))
except Page.DoesNotExist:
return "<a>"
EMBED_HANDLERS = {}
LINK_HANDLERS = {
'page': PageLinkHandler,
}
has_loaded_embed_handlers = False
has_loaded_link_handlers = False
def get_embed_handler(embed_type):
global EMBED_HANDLERS, has_loaded_embed_handlers
if not has_loaded_embed_handlers:
for hook in hooks.get_hooks('register_rich_text_embed_handler'):
handler_name, handler = hook()
EMBED_HANDLERS[handler_name] = handler
has_loaded_embed_handlers = True
return EMBED_HANDLERS[embed_type]
def get_link_handler(link_type):
global LINK_HANDLERS, has_loaded_link_handlers
if not has_loaded_link_handlers:
for hook in hooks.get_hooks('register_rich_text_link_handler'):
handler_name, handler = hook()
LINK_HANDLERS[handler_name] = handler
has_loaded_link_handlers = True
return LINK_HANDLERS[link_type]
class DbWhitelister(Whitelister):
"""
A custom whitelisting engine to convert the HTML as returned by the rich text editor
into the pseudo-HTML format stored in the database (in which images, documents and other
linked objects are identified by ID rather than URL):
* implements a 'construct_whitelister_element_rules' hook so that other apps can modify
the whitelist ruleset (e.g. to permit additional HTML elements beyond those in the base
Whitelister module);
* replaces any element with a 'data-embedtype' attribute with an <embed> element, with
attributes supplied by the handler for that type as defined in EMBED_HANDLERS;
* rewrites the attributes of any <a> element with a 'data-linktype' attribute, as
determined by the handler for that type defined in LINK_HANDLERS, while keeping the
element content intact.
"""
has_loaded_custom_whitelist_rules = False
@classmethod
def clean(cls, html):
if not cls.has_loaded_custom_whitelist_rules:
for fn in hooks.get_hooks('construct_whitelister_element_rules'):
cls.element_rules = cls.element_rules.copy()
cls.element_rules.update(fn())
cls.has_loaded_custom_whitelist_rules = True
return super(DbWhitelister, cls).clean(html)
@classmethod
def clean_tag_node(cls, doc, tag):
if 'data-embedtype' in tag.attrs:
embed_type = tag['data-embedtype']
# fetch the appropriate embed handler for this embedtype
embed_handler = get_embed_handler(embed_type)
embed_attrs = embed_handler.get_db_attributes(tag)
embed_attrs['embedtype'] = embed_type
embed_tag = doc.new_tag('embed', **embed_attrs)
embed_tag.can_be_empty_element = True
tag.replace_with(embed_tag)
elif tag.name == 'a' and 'data-linktype' in tag.attrs:
# first, whitelist the contents of this tag
for child in tag.contents:
cls.clean_node(doc, child)
link_type = tag['data-linktype']
link_handler = get_link_handler(link_type)
link_attrs = link_handler.get_db_attributes(tag)
link_attrs['linktype'] = link_type
tag.attrs.clear()
tag.attrs.update(**link_attrs)
elif tag.name == 'div':
tag.name = 'p'
else:
super(DbWhitelister, cls).clean_tag_node(doc, tag)
FIND_A_TAG = re.compile(r'<a(\b[^>]*)>')
FIND_EMBED_TAG = re.compile(r'<embed(\b[^>]*)/>')
FIND_ATTRS = re.compile(r'([\w-]+)\="([^"]*)"')
def extract_attrs(attr_string):
"""
helper method to extract tag attributes as a dict. Does not escape HTML entities!
"""
attributes = {}
for name, val in FIND_ATTRS.findall(attr_string):
attributes[name] = val
return attributes
def expand_db_html(html, for_editor=False):
"""
Expand database-representation HTML into proper HTML usable in either
templates or the rich text editor
"""
def replace_a_tag(m):
attrs = extract_attrs(m.group(1))
if 'linktype' not in attrs:
# return unchanged
return m.group(0)
handler = get_link_handler(attrs['linktype'])
return handler.expand_db_attributes(attrs, for_editor)
def replace_embed_tag(m):
attrs = extract_attrs(m.group(1))
handler = get_embed_handler(attrs['embedtype'])
return handler.expand_db_attributes(attrs, for_editor)
html = FIND_A_TAG.sub(replace_a_tag, html)
html = FIND_EMBED_TAG.sub(replace_embed_tag, html)
return html
|
mfwarren/projector
|
projector/config/production.py
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgird to sendemails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIREY, AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('projector <noreply@example.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[projector] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
|
pbrod/numpy
|
numpy/ma/extras.py
|
"""
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
)
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
from numpy.lib.function_base import _ureduce
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
return isinstance(seq, (ndarray, tuple, list))
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(
data=[[--, --, --],
[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float64)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(
data=[[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float32)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
This class should not be used directly. Instead, one of its extensions that
provides support for a specific type of input should be used.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
doc = ma.doc_note(doc, "The function is applied to both the _data "
"and the _mask, if any.")
return '\n\n'.join((sig, doc))
return
def __call__(self, *args, **params):
pass
class _fromnxfunction_single(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single array
argument followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
if isinstance(x, ndarray):
_d = func(x.__array__(), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
else:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_seq(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single sequence
of arrays followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
_d = func(tuple([np.asarray(a) for a in x]), *args, **params)
_m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_args(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. The first non-array-like input marks the beginning of the
arguments that are passed verbatim for both the data and mask calls.
Array arguments are processed independently and the results are
returned in a list. If only one array is found, the return value is
just the processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
if len(arrays) == 1:
return res[0]
return res
class _fromnxfunction_allargs(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. Similar to `_fromnxfunction_args` except that all args
are converted to arrays even if they are not so already. This makes
it possible to process scalars as 1-D arrays. Only keyword arguments
are passed through verbatim for the data and mask calls. Arrays
arguments are processed independently and the results are returned
in a list. If only one arg is present, the return value is just the
processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
res = []
for x in args:
_d = func(np.asarray(x), **params)
_m = func(getmaskarray(x), **params)
res.append(masked_array(_d, mask=_m))
if len(args) == 1:
return res[0]
return res
atleast_1d = _fromnxfunction_allargs('atleast_1d')
atleast_2d = _fromnxfunction_allargs('atleast_2d')
atleast_3d = _fromnxfunction_allargs('atleast_3d')
vstack = row_stack = _fromnxfunction_seq('vstack')
hstack = _fromnxfunction_seq('hstack')
column_stack = _fromnxfunction_seq('column_stack')
dstack = _fromnxfunction_seq('dstack')
stack = _fromnxfunction_seq('stack')
hsplit = _fromnxfunction_single('hsplit')
diagflat = _fromnxfunction_single('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = np.ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = np.ma.masked
>>> a[:,1,:] = np.ma.masked
>>> a
masked_array(
data=[[[0, --, 2, 3],
[--, --, --, --],
[8, 9, 10, 11]],
[[12, --, 14, 15],
[--, --, --, --],
[20, 21, 22, 23]]],
mask=[[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]],
[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]]],
fill_value=999999)
>>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
Tuple axis arguments to ufuncs are equivalent:
>>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which to average `a`. If None, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. The 1-D calculation is::
avg = sum(a * weights) / sum(weights)
The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> x
masked_array(
data=[[0., 1.],
[2., 3.],
[4., 5.]],
mask=False,
fill_value=1e+20)
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> avg
masked_array(data=[2.6666666666666665, 3.6666666666666665],
mask=[False, False],
fill_value=1e+20)
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.count(axis))
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
wgt = wgt*(~a.mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.10.0
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data=[2.0, 5.0],
mask=[False, False],
fill_value=1e+20)
"""
if not hasattr(a, 'mask'):
m = np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
if isinstance(m, np.ndarray) and 1 <= m.ndim:
return masked_array(m, copy=False)
else:
return m
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# when an unmasked NaN is present return it, so we need to sort the NaN
# values behind the mask
if np.issubdtype(a.dtype, np.inexact):
fill_value = np.inf
else:
fill_value = None
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort(fill_value=fill_value)
else:
a.sort(axis=axis, fill_value=fill_value)
asorted = a
else:
asorted = sort(a, axis=axis, fill_value=fill_value)
if axis is None:
axis = 0
else:
axis = normalize_axis_index(axis, asorted.ndim)
if asorted.shape[axis] == 0:
# for empty axis integer indices fail so use slicing to get same result
# as median (which is mean of empty slice = nan)
indexer = [slice(None)] * asorted.ndim
indexer[axis] = slice(0, 0)
indexer = tuple(indexer)
return np.ma.mean(asorted[indexer], axis=axis, out=out)
if asorted.ndim == 1:
counts = count(asorted)
idx, odd = divmod(count(asorted), 2)
mid = asorted[idx + odd - 1:idx + 1]
if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
# avoid inf / x = masked
s = mid.sum(out=out)
if not odd:
s = np.true_divide(s, 2., casting='safe', out=out)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = mid.mean(out=out)
# if result is masked either the input contained enough
# minimum_fill_value so that it would be the median or all values
# masked
if np.ma.is_masked(s) and not np.all(asorted.mask):
return np.ma.minimum_fill_value(asorted)
return s
counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
l = np.where(odd, h, h-1)
lh = np.concatenate([l,h], axis=axis)
# get low and high median
low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
# are masked. This is required as the sort order of values equal or
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = np.ma.mean(low_high, axis=axis, out=out)
return s
def compress_nd(x, axis=None):
"""Suppress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to suppress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to suppress slices from.
- If axis is an int, then that is the only axis to suppress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if axis is None:
axis = tuple(range(x.ndim))
else:
axis = normalize_axis_tuple(axis, x.ndim)
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(
data=[[--, 1, 2],
[--, 4, 5],
[6, 7, 8]],
mask=[[ True, False, False],
[ True, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`compress_rowcols` for details.
See Also
--------
compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`compress_rowcols` for details.
See Also
--------
compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_rows(a)
masked_array(
data=[[0, 0, 0],
[--, --, --],
[0, 0, 0]],
mask=[[False, False, False],
[ True, True, True],
[False, False, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_cols(a)
masked_array(
data=[[0, --, 0],
[0, --, 0],
[0, --, 0]],
mask=[[False, True, False],
[False, True, False],
[False, True, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> np.ma.intersect1d(x, y)
masked_array(data=[1, 3, --],
mask=[False, False, True],
fill_value=999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
We recommend using :func:`isin` instead of `in1d` for new code.
See Also
--------
isin : Version of this function that preserves the shape of ar1.
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over
`element` only.
The output is always a masked array of the same shape as `element`.
See `numpy.isin` for more details.
See Also
--------
in1d : Flattened version of this function.
numpy.isin : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.13.0
"""
element = ma.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See Also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2), axis=None))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data=[3, --],
mask=[False, True],
fill_value=999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
xmask = x._mask = y._mask = ymask = common_mask
x._sharedmask = False
y._sharedmask = False
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning, stacklevel=2)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
_denom._sharedmask = False # We know return is always a copy
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
concatenate = staticmethod(concatenate)
@classmethod
def makemat(cls, arr):
# There used to be a view as np.matrix here, but we may eventually
# deprecate that class. In preparation, we use the unmasked version
# to construct the matrix (with copy=False for backwards compatibility
# with the .view)
data = super().makemat(arr.data, copy=False)
return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
return super().__getitem__(key)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
masked_array(data=[1, 2, 3, ..., 4, 5, 6],
mask=False,
fill_value=999999)
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_edges(a)
array([0, 9])
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print(np.ma.flatnotmasked_edges(a))
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(am)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of `slice` objects (start index, end index).
.. versionchanged:: 1.15.0
Now returns an empty list instead of None for a fully masked array
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
[slice(0, 10, None)]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> np.ma.flatnotmasked_contiguous(a)
[]
"""
m = getmask(a)
if m is nomask:
return [slice(0, a.size)]
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array, and this
is the same as `flatnotmasked_contiguous`.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
If the input is 2d and axis is specified, the result is a list of lists.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(12).reshape((3, 4))
>>> mask = np.zeros_like(a)
>>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
>>> ma = np.ma.array(a, mask=mask)
>>> ma
masked_array(
data=[[0, --, 2, 3],
[--, --, --, 7],
[8, --, --, 11]],
mask=[[False, True, False, False],
[ True, True, True, False],
[False, True, True, False]],
fill_value=999999)
>>> np.array(ma[~ma.mask])
array([ 0, 2, 3, 7, 8, 11])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
[[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
[[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[tuple(idx)]))
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
|
smn/vumi-app-router
|
vxapprouter/router.py
|
# -*- test-case-name: vxapprouter.tests.test_router -*-
import json
from urlparse import urlunparse
from twisted.internet.defer import inlineCallbacks
from vumi import log
from vumi.components.session import SessionManager
from vumi.config import (
ConfigDict, ConfigList, ConfigInt, ConfigText, ConfigUrl)
from vumi.dispatchers.endpoint_dispatchers import Dispatcher
from vumi.message import TransportUserMessage
from vumi.persist.txredis_manager import TxRedisManager
class ApplicationDispatcherConfig(Dispatcher.CONFIG_CLASS):
# Static configuration
session_expiry = ConfigInt(
("Maximum amount of time in seconds to keep session data around. "
"Defaults to 5 minutes."),
default=5 * 60, static=True)
message_expiry = ConfigInt(
("Maximum amount of time in seconds to keep message data around. "
"This is kept to handle async events. Defaults to 2 days."),
default=60 * 60 * 24 * 2, static=True)
redis_manager = ConfigDict(
"Redis client configuration.", default={}, static=True)
# Dynamic, per-message configuration
menu_title = ConfigText(
"Content for the menu title", default="Please select a choice.")
entries = ConfigList(
"A list of application endpoints and associated labels",
default=[])
invalid_input_message = ConfigText(
"Prompt to display when warning about an invalid choice",
default=("That is an incorrect choice. Please enter the number "
"of the menu item you wish to choose."))
try_again_message = ConfigText(
"What text to display when the user needs to try again.",
default="Try Again")
error_message = ConfigText(
("Prompt to display when a configuration change invalidates "
"an active session."),
default=("Oops! We experienced a temporary error. "
"Please try and dial the line again."))
routing_table = ConfigDict(
"Routing table. Keys are connector names, values are dicts mapping "
"endpoint names to [connector, endpoint] pairs.", required=True)
class StateResponse(object):
def __init__(self, state, session_update=None, inbound=(), outbound=()):
self.next_state = state
self.session_update = session_update or {}
self.inbound = inbound
self.outbound = outbound
def mkmenu(options, start=1, format='%s) %s'):
items = [format % (idx, opt) for idx, opt in enumerate(options, start)]
return '\n'.join(items)
def clean(content):
return (content or '').strip()
class ApplicationDispatcher(Dispatcher):
CONFIG_CLASS = ApplicationDispatcherConfig
worker_name = 'application_dispatcher'
STATE_START = "start"
STATE_SELECT = "select"
STATE_SELECTED = "selected"
STATE_BAD_INPUT = "bad_input"
@inlineCallbacks
def setup_dispatcher(self):
yield super(ApplicationDispatcher, self).setup_dispatcher()
self.handlers = {
self.STATE_START: self.handle_state_start,
self.STATE_SELECT: self.handle_state_select,
self.STATE_SELECTED: self.handle_state_selected,
self.STATE_BAD_INPUT: self.handle_state_bad_input,
}
config = self.get_static_config()
txrm = yield TxRedisManager.from_config(config.redis_manager)
self.redis = txrm.sub_manager(self.worker_name)
def session_manager(self, config):
return SessionManager(
self.redis, max_session_length=config.session_expiry)
def forwarded_message(self, msg, **kwargs):
copy = TransportUserMessage(**msg.payload)
for k, v in kwargs.items():
copy[k] = v
return copy
def target_endpoints(self, config):
"""
Make sure the currently active endpoint is still valid.
"""
return set([entry['endpoint'] for entry in config.entries])
def get_endpoint_for_choice(self, msg, session):
"""
Retrieves the candidate endpoint based on the user's numeric choice
"""
endpoints = json.loads(session['endpoints'])
index = self.get_menu_choice(msg, (1, len(endpoints)))
if index is None:
return None
return endpoints[index - 1]
def get_menu_choice(self, msg, valid_range):
"""
Parse user input for selecting a numeric menu choice
"""
try:
value = int(clean(msg['content']))
except ValueError:
return None
else:
if value not in range(valid_range[0], valid_range[1] + 1):
return None
return value
def make_first_reply(self, config, session, msg):
return msg.reply(self.create_menu(config))
def make_invalid_input_reply(self, config, session, msg):
return msg.reply('%s\n\n1. %s' % (
config.invalid_input_message, config.try_again_message))
def handle_state_start(self, config, session, msg):
"""
When presenting the menu, we also store the list of endpoints
in the session data. Later, in the select state, we load
these endpoints and retrieve the candidate endpoint based
on the user's menu choice.
"""
reply_msg = self.make_first_reply(config, session, msg)
endpoints = json.dumps(
[entry['endpoint'] for entry in config.entries]
)
return StateResponse(
self.STATE_SELECT, {'endpoints': endpoints}, outbound=[reply_msg])
def handle_state_select(self, config, session, msg):
endpoint = self.get_endpoint_for_choice(msg, session)
if endpoint is None:
reply_msg = self.make_invalid_input_reply(config, session, msg)
return StateResponse(self.STATE_BAD_INPUT, outbound=[reply_msg])
if endpoint not in self.target_endpoints(config):
log.msg(("Router configuration change forced session "
"termination for user %s" % msg['from_addr']))
error_reply_msg = self.make_error_reply(msg, config)
return StateResponse(None, outbound=[error_reply_msg])
forwarded_msg = self.forwarded_message(
msg, content=None,
session_event=TransportUserMessage.SESSION_NEW)
log.msg("Switched to endpoint '%s' for user %s" %
(endpoint, msg['from_addr']))
return StateResponse(
self.STATE_SELECTED, {'active_endpoint': endpoint},
inbound=[(forwarded_msg, endpoint)])
def handle_state_selected(self, config, session, msg):
active_endpoint = session['active_endpoint']
if active_endpoint not in self.target_endpoints(config):
log.msg(("Router configuration change forced session "
"termination for user %s" % msg['from_addr']))
error_reply_msg = self.make_error_reply(msg, config)
return StateResponse(None, outbound=[error_reply_msg])
else:
return StateResponse(
self.STATE_SELECTED, inbound=[(msg, active_endpoint)])
def handle_state_bad_input(self, config, session, msg):
choice = self.get_menu_choice(msg, (1, 1))
if choice is None:
reply_msg = self.make_invalid_input_reply(config, session, msg)
return StateResponse(self.STATE_BAD_INPUT, outbound=[reply_msg])
else:
return self.handle_state_start(config, session, msg)
@inlineCallbacks
def handle_session_close(self, config, session, msg, connector_name):
user_id = msg['from_addr']
if (session.get('state', None) == self.STATE_SELECTED and
session['active_endpoint'] in self.target_endpoints(config)):
target = self.find_target(config, msg, connector_name, session)
yield self.publish_inbound(msg, target[0], target[1])
session_manager = yield self.session_manager(config)
yield session_manager.clear_session(user_id)
def create_menu(self, config):
labels = [entry['label'] for entry in config.entries]
return (config.menu_title + "\n" + mkmenu(labels))
def make_error_reply(self, msg, config):
return msg.reply(config.error_message, continue_session=False)
def find_target(self, config, msg, connector_name, session={}):
endpoint_name = session.get(
'active_endpoint', msg.get_routing_endpoint())
endpoint_routing = config.routing_table.get(connector_name)
if endpoint_routing is None:
log.warning("No routing information for connector '%s'" % (
connector_name,))
return None
target = endpoint_routing.get(endpoint_name)
if target is None:
log.warning("No routing information for endpoint '%s' on '%s'" % (
endpoint_name, connector_name,))
return None
return target
@inlineCallbacks
def process_inbound(self, config, msg, connector_name):
log.msg("Processing inbound message: %s" % (msg,))
user_id = msg['from_addr']
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
session_event = msg['session_event']
if not session or session_event == TransportUserMessage.SESSION_NEW:
log.msg("Creating session for user %s" % user_id)
session = {}
state = self.STATE_START
yield session_manager.create_session(user_id, state=state)
elif session_event == TransportUserMessage.SESSION_CLOSE:
yield self.handle_session_close(
config, session, msg, connector_name)
return
else:
log.msg("Loading session for user %s: %s" % (user_id, session,))
state = session['state']
try:
# We must assume the state handlers might be async, even if the
# current implementations aren't. There is at least one test that
# depends on asynchrony here to hook into the state transition.
state_resp = yield self.handlers[state](config, session, msg)
if state_resp.next_state is None:
# Session terminated (right now, just in the case of a
# administrator-initiated configuration change
yield session_manager.clear_session(user_id)
else:
session['state'] = state_resp.next_state
session.update(state_resp.session_update)
if state != state_resp.next_state:
log.msg("State transition for user %s: %s => %s" %
(user_id, state, state_resp.next_state))
yield session_manager.save_session(user_id, session)
for msg, endpoint in state_resp.inbound:
target = self.find_target(
config, msg, connector_name, session)
yield self.publish_inbound(msg, target[0], target[1])
for msg in state_resp.outbound:
yield self.process_outbound(config, msg, connector_name)
except:
log.err()
yield session_manager.clear_session(user_id)
yield self.process_outbound(
config, self.make_error_reply(msg, config), connector_name)
@inlineCallbacks
def process_outbound(self, config, msg, connector_name):
log.msg("Processing outbound message: %s" % (msg,))
user_id = msg['to_addr']
session_event = msg['session_event']
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
if session and (session_event == TransportUserMessage.SESSION_CLOSE):
yield session_manager.clear_session(user_id)
yield self.cache_outbound_user_id(msg['message_id'],
msg['to_addr'])
target = self.find_target(config, msg, connector_name)
if target is None:
return
yield self.publish_outbound(msg, target[0], target[1])
def mk_msg_key(self, message_id):
return ':'.join(['cache', message_id])
@inlineCallbacks
def cache_outbound_user_id(self, message_id, user_id):
key = self.mk_msg_key(message_id)
yield self.redis.setex(
key, self.get_static_config().message_expiry, user_id)
def get_cached_user_id(self, message_id):
return self.redis.get(self.mk_msg_key(message_id))
@inlineCallbacks
def process_event(self, config, event, connector_name):
user_id = yield self.get_cached_user_id(event['user_message_id'])
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
if not session.get('active_endpoint'):
target = None
else:
target = self.find_target(config, event, connector_name, session)
if target is None:
return
yield self.publish_event(event, target[0], target[1])
class MessengerApplicationDispatcherConfig(ApplicationDispatcher.CONFIG_CLASS):
sub_title = ConfigText('The subtitle')
image_url = ConfigUrl('The URL for an image')
class MessengerApplicationDispatcher(ApplicationDispatcher):
CONFIG_CLASS = MessengerApplicationDispatcherConfig
def make_first_reply(self, config, session, msg):
msg = super(MessengerApplicationDispatcher, self).make_first_reply(
config, session, msg)
# Magically render a Messenger menu if less than 3 items.
if len(config.entries) <= 3:
msg['helper_metadata']['messenger'] = {
'template_type': 'generic',
'title': config.menu_title,
'subtitle': config.sub_title,
'image_url': urlunparse(config.image_url),
'buttons': [{
'title': entry['label'],
'payload': {
"content": str(index + 1),
"in_reply_to": msg['message_id'],
}
} for (index, entry) in enumerate(config.entries)]
}
return msg
def make_invalid_input_reply(self, config, session, msg):
msg = super(
MessengerApplicationDispatcher, self).make_invalid_input_reply(
config, session, msg)
msg['helper_metadata']['messenger'] = {
'template_type': 'generic',
'title': config.menu_title,
'subtitle': config.invalid_input_message,
'image_url': urlunparse(config.image_url),
'buttons': [{
'title': config.try_again_message,
'payload': {
"content": '1',
"in_reply_to": msg['message_id'],
}
}]
}
return msg
|
afajl/sy
|
sy/prompt.py
|
'''
:synopsis: Prompt users for information
.. moduleauthor: Paul Diaconescu <p@afajl.com>
'''
import re
try:
import readline
has_readline = True
except ImportError:
has_readline = False
pass
def _indent_out(question):
indent = 0
for c in question:
if c != ' ':
break
indent += 1
def out(msg, to_s=False):
s = ' '*indent + msg
if to_s:
return s
else:
print s
return out
def _to_type(answer, type):
''' Tries to convert the answer to the desired type '''
if type is None:
# Dont convert
return answer, None
if type is int:
try:
return type(answer), None
except ValueError:
return None, 'Answer must be a integer'
if type is float:
try:
return type(answer), None
except ValueError:
return None, 'Answer must be a float'
if type is bool:
if answer[0] in ('y', 'Y', 't', 'j'):
return True, None
elif answer[0] in ('n', 'N', 'f'):
return False, None
else:
return None, 'Answer yes or no'
else:
return type(answer)
return type(answer), None
def _run_checks(answer, checks):
''' Runs checks, (func, help) on answer '''
error = None
for test, help in checks:
if isinstance(test, str):
match = re.match(test, answer)
if not match:
error = help
break
if hasattr(test, 'match'):
match = test.match(answer)
if not match:
error = help
break
if hasattr(test, '__call__'):
if not test(answer):
error = help
break
return error
def ask(question, default='', type=None, checks=()):
''' Ask user a question
:arg question: Question to prompt for. Leading spaces will set the
indent level for the error responses.
:arg default: The default answer as a string.
:arg type: Python type the answer must have. Answers are converted
to the requested type before checks are run. Support for
str, int, float and bool are built in.
If you supply your own function it must take a string as
argument and return a tuple where the first value is the
converted answer or None if if failed. If it fails the
second value is the error message displayd to the user,
example::
def int_list(answer):
try:
ints = [int(i) for i in answer.split(',')]
# Success!
return ints, None
except ValueError:
# Fail!
return None, 'You must supply a list of integers'
sy.prompt.ask('Give me a intlist: ', type=int_list)
Give me a intlist: 1, 2, 3
[1, 2, 3]
:arg checks: List of checks in the form ``[(check, errormsg), (check, ...)]``.
The check can be a regular expression string, a compiled
pattern or a function. The function must take a string as
argument and return True if the check passes. If the check
fails the errormsg is printed to the user.
'''
assert isinstance(default, str), 'Default must be a string'
# Get a print_error function that correctly indents
# the error message
print_error = _indent_out(question)
while True:
answer = raw_input(question).strip()
if not answer:
if default:
answer = default
else:
# ask again
continue
converted = answer
if type:
converted, error = _to_type(answer, type)
if error:
print_error(error)
continue
if checks:
error = _run_checks(converted, checks)
if error:
print_error(error)
continue
return converted
def confirm(question, default=''):
''' Ask a yes or no question
:arg default: True or False
:returns: Boolean answer
'''
if default is True:
default='y'
elif default is False:
default='n'
return ask(question,
default=default,
type=bool)
def choose(question, choices, multichoice=False, default=''):
''' Let user select one or more items from a list
Presents user with the question and the list of choices. Returns the index
of the choice selected. If ``multichoice``
is true the user can pick more then one choice and a list of indexes are
returned::
choice = sy.prompt.choose('Pick one:', ['a', 'b', 'c'])
# Pick one:
# 1) a
# 2) b
# 3) c
# Choice: 1
print choice
0
choices = sy.prompt.choose('Pick one or more:', ['a', 'b', 'c'],
mutlichoice=True)
# Pick one or more:
# 1) a
# 2) b
# 3) c
# Choices: 1, 3
print choices
[0,2]
:arg question: Question to print before list of choices
:arg choices: List of choices. If the choice is not a string an attempt to
convert it to a string with :func:`str()` is made.
:arg multichoice: If True the user can pick multiple choices, separated by
commas. The return value will be a list of indexes in the
choices list that the user picked.
:arg default: Default choice as a string the user would have written, ex:
``"1,2"``.
'''
out = _indent_out(question)
print question
for i, choice in enumerate(choices):
out( '%d) %s' % (i+1, str(choice)) )
print
if multichoice:
choice_q = 'Choices: '
else:
choice_q = 'Choice: '
def to_index_list(answer):
try:
ints = [int(i) - 1 for i in re.split(r'\s*,\s*|\s+', answer)]
for i in ints:
if i < 0 or i >= len(choices):
return None, '%d is not a valid option' % (i + 1)
return ints, None
except ValueError:
return None, 'You must use numbers'
while True:
selected = ask(out(choice_q, to_s=True), type=to_index_list,
default=default)
if selected:
if not multichoice:
if len(selected) > 1:
out('Select one value')
continue
return selected[0]
else:
return selected
|
jimporter/bfg9000
|
test/unit/arguments/test_windows.py
|
from .. import *
from bfg9000.arguments.windows import *
class TestWindowsArgParse(TestCase):
def test_empty(self):
parser = ArgumentParser()
self.assertEqual(parser.parse_known([]), ({}, []))
self.assertEqual(parser.parse_known(['extra']), ({}, ['extra']))
self.assertEqual(parser.parse_known(['/extra']), ({}, ['/extra']))
def test_short_bool(self):
parser = ArgumentParser()
parser.add('/a')
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/a']), ({'a': True}, []))
self.assertEqual(parser.parse_known(['/a', '/a']), ({'a': True}, []))
parser = ArgumentParser()
parser.add('/a', '-a')
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/a']), ({'a': True}, []))
self.assertEqual(parser.parse_known(['-a']), ({'a': True}, []))
def test_long_bool(self):
parser = ArgumentParser()
parser.add('/foo')
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo']), ({'foo': True}, []))
self.assertEqual(parser.parse_known(['/foo', '/foo']),
({'foo': True}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo')
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo']), ({'foo': True}, []))
self.assertEqual(parser.parse_known(['-foo']), ({'foo': True}, []))
def test_short_str(self):
parser = ArgumentParser()
parser.add('/a', type=str)
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/afoo', '/a', 'bar']),
({'a': 'bar'}, []))
parser = ArgumentParser()
parser.add('/a', '-a', type=str)
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['-afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['-a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/afoo', '-a', 'bar']),
({'a': 'bar'}, []))
def test_long_str(self):
parser = ArgumentParser()
parser.add('/foo', type=str)
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '/foo:baz']),
({'foo': 'baz'}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo', type=str)
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['-foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '-foo:baz']),
({'foo': 'baz'}, []))
def test_short_list(self):
parser = ArgumentParser()
parser.add('/a', type=list)
self.assertEqual(parser.parse_known([]), ({'a': []}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/afoo', '/a', 'bar']),
({'a': ['foo', 'bar']}, []))
parser = ArgumentParser()
parser.add('/a', '-a', type=list)
self.assertEqual(parser.parse_known([]), ({'a': []}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['-afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['-a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/afoo', '-a', 'bar']),
({'a': ['foo', 'bar']}, []))
def test_long_list(self):
parser = ArgumentParser()
parser.add('/foo', type=list)
self.assertEqual(parser.parse_known([]), ({'foo': []}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '/foo:baz']),
({'foo': ['bar', 'baz']}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo', type=list)
self.assertEqual(parser.parse_known([]), ({'foo': []}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['-foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '-foo:baz']),
({'foo': ['bar', 'baz']}, []))
def test_short_dict(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
warn.add('X', type=bool, dest='error')
warn.add('X-', type=bool, dest='error', value=False)
warn.add('v', type=str, dest='version')
self.assertEqual(parser.parse_known([]), ({
'warn': {'level': None, 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2']), ({
'warn': {'level': '2', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2', '/W4']), ({
'warn': {'level': '4', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2', '/WX']), ({
'warn': {'level': '2', 'error': True, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Wv17']), ({
'warn': {'level': None, 'error': None, 'version': '17'}
}, []))
self.assertEqual(parser.parse_known(['/Wfoo']), ({
'warn': {'level': None, 'error': None, 'version': None}
}, ['/Wfoo']))
self.assertEqual(parser.parse_known(
['/WX', '/W2', '/WX-', '/Wall', '/Wv17', '/Wfoo']
), ({'warn': {'level': 'all', 'error': False, 'version': '17'}},
['/Wfoo']))
def test_long_dict(self):
parser = ArgumentParser()
warn = parser.add('/Warn', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
warn.add('X', type=bool, dest='error')
warn.add('X-', type=bool, dest='error', value=False)
warn.add('v', type=str, dest='version')
self.assertEqual(parser.parse_known([]), ({
'warn': {'level': None, 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2']), ({
'warn': {'level': '2', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2', '/Warn:4']), ({
'warn': {'level': '4', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2', '/Warn:X']), ({
'warn': {'level': '2', 'error': True, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:v17']), ({
'warn': {'level': None, 'error': None, 'version': '17'}
}, []))
self.assertEqual(parser.parse_known(['/Warn:foo']), ({
'warn': {'level': None, 'error': None, 'version': None}
}, ['/Warn:foo']))
self.assertEqual(parser.parse_known(
['/Warn:X', '/Warn:2', '/Warn:X-', '/Warn:all', '/Warn:v17',
'/Warn:foo']
), ({'warn': {'level': 'all', 'error': False, 'version': '17'}},
['/Warn:foo']))
def test_alias(self):
parser = ArgumentParser()
nologo = parser.add('/nologo')
warn = parser.add('/W', type=dict, dest='warn')
warn.add('0', '1', '2', '3', '4', 'all', dest='level')
parser.add('/N', type='alias', base=nologo)
parser.add('/w', type='alias', base=warn, value='0')
self.assertEqual(parser.parse_known([]),
({'nologo': None, 'warn': {'level': None}}, []))
self.assertEqual(parser.parse_known(['/N']),
({'nologo': True, 'warn': {'level': None}}, []))
self.assertEqual(parser.parse_known(['/w']),
({'nologo': None, 'warn': {'level': '0'}}, []))
def test_unnamed(self):
parser = ArgumentParser()
parser.add('/a')
parser.add_unnamed('libs')
self.assertEqual(parser.parse_known([]),
({'a': None, 'libs': []}, []))
self.assertEqual(parser.parse_known(['foo']),
({'a': None, 'libs': ['foo']}, []))
self.assertEqual(parser.parse_known(['foo', '/a', 'bar']),
({'a': True, 'libs': ['foo', 'bar']}, []))
def test_case(self):
parser = ArgumentParser()
parser.add('/s')
parser.add('/long')
self.assertEqual(parser.parse_known(['/s', '/long']),
({'s': True, 'long': True}, []))
self.assertEqual(parser.parse_known(['/S', '/LONG']),
({'s': None, 'long': None}, ['/S', '/LONG']))
parser = ArgumentParser(case_sensitive=False)
parser.add('/s')
parser.add('/long')
self.assertEqual(parser.parse_known(['/s', '/long']),
({'s': True, 'long': True}, []))
self.assertEqual(parser.parse_known(['/S', '/LONG']),
({'s': None, 'long': True}, ['/S']))
def test_collision(self):
parser = ArgumentParser()
parser.add('/a', '/longa')
with self.assertRaises(ValueError):
parser.add('/a')
with self.assertRaises(ValueError):
parser.add('/abc')
with self.assertRaises(ValueError):
parser.add('/longa')
def test_invalid_prefix_char(self):
parser = ArgumentParser()
with self.assertRaises(ValueError):
parser.add('warn')
def test_unexpected_value(self):
parser = ArgumentParser()
parser.add('/a', '/longa')
with self.assertRaises(ValueError):
parser.parse_known(['/afoo'])
with self.assertRaises(ValueError):
parser.parse_known(['/longa:foo'])
def test_expected_value(self):
parser = ArgumentParser()
parser.add('/a', '/longa', type=str)
parser.add('/list', type=list)
warn = parser.add('/warn', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
with self.assertRaises(ValueError):
parser.parse_known(['/a'])
with self.assertRaises(ValueError):
parser.parse_known(['/longa'])
with self.assertRaises(ValueError):
parser.parse_known(['/list'])
with self.assertRaises(ValueError):
parser.parse_known(['/warn'])
def test_invalid_dict_child(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn')
with self.assertRaises(ValueError):
warn.add('version', type=str)
def test_unexpected_dict_value(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn', strict=True)
warn.add('1', '2', '3', '4', 'all', dest='level')
with self.assertRaises(ValueError):
parser.parse_known(['/WX'])
def test_invalid_alias_base(self):
parser = ArgumentParser()
warn = parser.add('/W')
with self.assertRaises(TypeError):
parser.add('/w', type='alias', base=warn, value='0')
|
apple/coremltools
|
coremltools/converters/onnx/_tests/test_graph.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from coremltools._deps import _HAS_ONNX, MSG_ONNX_NOT_FOUND
if _HAS_ONNX:
import onnx
from onnx import helper, numpy_helper, TensorProto
from coremltools.converters.onnx._graph import Node, Graph
from ._test_utils import (
_onnx_create_single_node_model,
_onnx_create_model,
_conv_pool_output_size,
_random_array,
)
@unittest.skipUnless(_HAS_ONNX, MSG_ONNX_NOT_FOUND)
class NodeTest(unittest.TestCase):
def test_create_node(self): # type: () -> None
model = _onnx_create_single_node_model(
"Elu", [(1, 3, 224, 224)], [(1, 3, 224, 224)], alpha=0.5
)
graph = model.graph
node = graph.node[0]
node_ = Node.from_onnx(node)
self.assertTrue(len(node_.inputs) == 1)
self.assertTrue(len(node_.outputs) == 1)
self.assertTrue(len(node_.attrs) == 1)
self.assertTrue(node_.attrs["alpha"] == 0.5)
@unittest.skipUnless(_HAS_ONNX, MSG_ONNX_NOT_FOUND)
class GraphTest(unittest.TestCase):
def test_create_graph(self): # type: () -> None
kernel_shape = (3, 2)
strides = (2, 3)
pads = (4, 2, 4, 2)
dilations = (1, 2)
group = 1
weight = numpy_helper.from_array(_random_array((16, 3, 3, 2)), name="weight")
input_shape = (1, 3, 224, 224)
output_size = _conv_pool_output_size(
input_shape, dilations, kernel_shape, pads, strides
)
output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])
inputs = [("input0", input_shape)]
outputs = [("output0", output_shape, TensorProto.FLOAT)]
conv = helper.make_node(
"Conv",
inputs=[inputs[0][0], "weight"],
outputs=["conv_output"],
dilations=dilations,
group=group,
kernel_shape=kernel_shape,
pads=pads,
strides=strides,
)
relu = helper.make_node(
"Relu", inputs=[conv.output[0]], outputs=[outputs[0][0]]
)
model = _onnx_create_model([conv, relu], inputs, outputs, [weight])
graph_ = Graph.from_onnx(model.graph, onnx_ir_version=5)
self.assertTrue(len(graph_.inputs) == 1)
self.assertEqual(graph_.inputs[0][2], input_shape)
self.assertTrue(len(graph_.outputs) == 1)
self.assertEqual(graph_.outputs[0][2], output_shape)
self.assertTrue(len(graph_.nodes) == 2)
self.assertEqual(len(graph_.nodes[0].parents), 0)
self.assertEqual(len(graph_.nodes[1].parents), 1)
self.assertEqual(len(graph_.nodes[0].children), 1)
self.assertEqual(len(graph_.nodes[1].children), 0)
|
pdfliberation/python-hocrgeo
|
hocrgeo/parsers/hocr.py
|
from __future__ import unicode_literals
from io import TextIOBase
try:
TextIOBase = file
except NameError:
pass # Forward compatibility with Py3k
from bs4 import BeautifulSoup
import re
from hocrgeo.models import HOCRDocument
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
class HOCRParser:
"""
Parse hOCR documents
Takes either a file-like object or a filename
"""
def __init__(self, fs=None):
'''
Initializes a HOCRParser
:param input: Optional file-like object to read or hOCR as a string.
'''
self._rawdata = None
self._bboxreg = re.compile(r'bbox (?P<x0>\d+) (?P<y0>\d+) (?P<x1>\d+) (?P<y1>\d+)')
self._imagereg = re.compile(r'image (\'|\")(.*)\1')
self._pagenoreg = re.compile(r'ppageno (\d+)')
self._doc = None
self._parseddata = None
if fs:
self._rawdata = self._get_data_string(fs)
def _get_data_string(self, fs):
if isinstance(fs, TextIOBase):
return fs.read()
else:
try:
if isinstance(fs, unicode):
return fs
else:
clean_fs = unicode(fs, encoding='utf-8')
if isinstance(clean_fs, unicode):
return clean_fs
except NameError:
if isinstance(fs, str):
return fs
raise TypeError('Input is not a readable string or file object')
def load(self, fsfile):
'''Load a file from a filepath or a file-like instance'''
fp = None
if isinstance(fsfile, str):
try:
fp = open(fsfile, 'rb')
except IOError as e:
raise e
elif isinstance(fs, TextIOBase):
fp = fsfile
else:
raise TypeError('argument must be a file object or a valid filepath')
self._rawdata = self._get_data_string(fp)
def loads(self, fs):
if isinstance(fs, str):
self._rawdata = self._get_data_string(fs)
else:
raise TypeError('argument must be a string or unicode instance')
@property
def document(self):
'''Parsed HOCR document'''
return self._doc
def parse(self):
'''Parse hOCR document into a python object.'''
def _extract_objects_from_element(root, el_name, el_class):
nodes = root.find_all(el_name, el_class)
objects = []
for n in nodes:
obj = _extract_features(n)
objects.append(obj)
return (nodes, objects)
def _extract_bbox(fs_str):
'''Regular expression matching on a fs_str that should contain hOCR bbox coordinates.'''
match = self._bboxreg.search(fs_str)
if match:
match_tup = match.groups()
match_list = []
for value in match_tup:
match_list.append(int(value))
return tuple(match_list)
return None
def _extract_features(element):
'''Extract basic hOCR features from a given element.'''
features = {}
features['id'] = element.get('id')
title_el = element.get('title', '')
image_match = self._imagereg.search(title_el)
if image_match:
features['image'] = image_match.group(2)
pageno_match = self._pagenoreg.search(title_el)
if pageno_match:
features['pageno'] = int(pageno_match.group(1))
features['bbox'] = _extract_bbox(title_el)
return features
if not self._rawdata:
raise Exception('No fsfile specified. You must specify an fs file when instantiating or as an argument to the parse method')
soup = BeautifulSoup(self._rawdata, "lxml")
self._parseddata = {}
# Extract ocr system metadata
ocr_system = soup.find('meta', attrs={'name': 'ocr-system'})
self._parseddata['system'] = ocr_system.get('content', None) if ocr_system else None
# Extract capabilities
ocr_capabilities = soup.find('meta', attrs={'name': 'ocr-capabilities'})
self._parseddata['capabilities'] = ocr_capabilities.get('content', ' ').split(' ') if ocr_capabilities else None
page_nodes, page_objects = _extract_objects_from_element(soup, 'div', 'ocr_page')
page_tup = list(zip(page_nodes, page_objects))
logger.info('Found {0} page(s)'.format(len(page_tup)))
for page_node, page_obj in page_tup:
carea_nodes, carea_objects = _extract_objects_from_element(page_node, 'div', 'ocr_carea')
careas_tup = list(zip(carea_nodes, carea_objects))
for c_node, c_obj in careas_tup:
para_nodes, para_objects = _extract_objects_from_element(c_node, 'p', 'ocr_par')
paras_tup = list(zip(para_nodes, para_objects))
for para_node, para_obj in paras_tup:
line_nodes, line_objects = _extract_objects_from_element(para_node, 'span', 'ocr_line')
lines_tup = list(zip(line_nodes, line_objects))
for l_node, l_obj in lines_tup:
word_nodes, word_objects = _extract_objects_from_element(l_node, 'span', 'ocrx_word')
words_tup = list(zip(word_nodes, word_objects))
for w_node, w_obj in words_tup:
word_str = w_node.get_text(strip=True)
if word_str:
# logger.info(word_str)
w_obj['text'] = w_node.get_text()
l_obj['words'] = word_objects
para_obj['lines'] = line_objects
c_obj['paragraphs'] = para_objects
page_obj['careas'] = carea_objects
self._parseddata['pages'] = page_objects
self._doc = HOCRDocument(self._parseddata)
|
ConPaaS-team/conpaas
|
conpaas-services/src/conpaas/core/misc.py
|
import socket
import fcntl
import struct
import zipfile
import tarfile
import readline
from subprocess import Popen, PIPE
from conpaas.core.https.server import FileUploadField
def file_get_contents(filepath):
f = open(filepath, 'r')
filecontent = f.read()
f.close()
return filecontent
def file_write_contents(filepath, filecontent):
f = open(filepath, 'w')
f.write(filecontent)
f.close()
def verify_port(port):
'''Raise Type Error if port is not an integer.
Raise ValueError if port is an invlid integer value.
'''
if type(port) != int: raise TypeError('port should be an integer')
if port < 1 or port > 65535: raise ValueError('port should be a valid port number')
def verify_ip_or_domain(ip):
'''Raise TypeError f ip is not a string.
Raise ValueError if ip is an invalid IP address in dot notation.
'''
if (type(ip) != str and type(ip) != unicode):
raise TypeError('IP is should be a string')
try:
socket.gethostbyname(ip)
except Exception as e:
raise ValueError('Invalid IP string "%s" -- %s' % (ip, e))
def verify_ip_port_list(l):
'''Check l is a list of [IP, PORT]. Raise appropriate Error if invalid types
or values were found
'''
if type(l) != list:
raise TypeError('Expected a list of [IP, PORT]')
for pair in l:
### FIXME HECTOR ...
#if len(pair) != 2:
if len(pair) < 2:
raise TypeError('List should contain IP,PORT values')
if 'ip' not in pair or 'port' not in pair:
raise TypeError('List should contain IP,PORT values')
verify_ip_or_domain(pair['ip'])
verify_port(pair['port'])
def archive_get_type(name):
if tarfile.is_tarfile(name):
return 'tar'
elif zipfile.is_zipfile(name):
return 'zip'
else: return None
def archive_open(name):
if tarfile.is_tarfile(name):
return tarfile.open(name)
elif zipfile.is_zipfile(name):
return zipfile.ZipFile(name)
else: return None
def archive_get_members(arch):
if isinstance(arch, zipfile.ZipFile):
members = arch.namelist()
elif isinstance(arch, tarfile.TarFile):
members = [ i.name for i in arch.getmembers() ]
return members
def archive_extract(arch, path):
if isinstance(arch, zipfile.ZipFile):
arch.extractall(path)
elif isinstance(arch, tarfile.TarFile):
arch.extractall(path=path)
def archive_close(arch):
if isinstance(arch, zipfile.ZipFile)\
or isinstance(arch, tarfile.TarFile):
arch.close()
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def run_cmd(cmd, directory='/'):
pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, error = pipe.communicate()
_return_code = pipe.wait()
return out, error
def run_cmd_code(cmd, directory='/'):
"""Same as run_cmd but it returns also the return code.
Parameters
----------
cmd : string
command to run in a shell
directory : string, default to '/'
directory where to run the command
Returns
-------
std_out, std_err, return_code
a triplet with standard output, standard error output and return code.
std_out : string
std_err : string
return_code : int
"""
pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, error = pipe.communicate()
return_code = pipe.wait()
return out, error, return_code
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return raw_input(prompt)
finally:
readline.set_startup_hook()
def list_lines(lines):
"""Returns the list of trimmed lines.
@param lines Multi-line string
"""
return list(filter(None, (x.strip() for x in lines.splitlines())))
def is_constraint(constraint, filter_res, errmsg):
def filter_constraint(arg):
if constraint(arg):
return filter_res(arg)
else:
raise Exception(errmsg(arg))
return filter_constraint
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def is_int(argument):
return is_constraint(lambda arg: represents_int(arg),
lambda arg: int(arg),
lambda arg: "'%s' has type '%s', should be integer" % (arg, type(arg).__name__))(argument)
def represents_bool(s):
return str(s).lower() in ("yes", "y", "true", "t", "1",
"no", "n", "false", "f", "0")
def is_bool(argument):
return is_constraint(lambda arg: represents_bool(arg),
lambda arg: str(arg).lower() in ("yes", "y", "true", "t", "1"),
lambda arg: "'%s' has type '%s', should be bool" % (arg, type(arg).__name__))(argument)
def is_more_than(minval):
return is_constraint(lambda arg: arg > minval,
lambda arg: arg,
lambda arg: "%s is not more than %s" % (arg, minval))
def is_more_or_eq_than(minval):
return is_constraint(lambda arg: arg >= minval,
lambda arg: arg,
lambda arg: "%s is not more or equal than %s" % (arg, minval))
def is_between(minval, maxval):
return is_constraint(lambda arg: arg >= minval and arg <= maxval,
lambda arg: arg,
lambda arg: "%s is not between %s and %s" % (arg, minval, maxval))
def is_pos_int(argument):
argint = is_int(argument)
return is_more_than(0)(argint)
def is_pos_nul_int(argument):
argint = is_int(argument)
return is_more_or_eq_than(0)(argint)
def is_in_list(exp_list):
return is_constraint(lambda arg: arg in exp_list,
lambda arg: arg,
lambda arg: "'%s' must be one of %s" % (arg, exp_list))
def is_not_in_list(exp_list):
return is_constraint(lambda arg: arg not in exp_list,
lambda arg: arg,
lambda arg: "'%s' must not be one of %s" % (arg, exp_list))
def is_string(argument):
return is_constraint(lambda arg: isinstance(arg, str) or isinstance(arg, unicode),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be string" % (arg, type(arg).__name__))(argument)
def is_non_empty_list(argument):
return is_constraint(lambda arg: isinstance(arg, list) and len(arg) > 0,
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be non-empty list" % (arg, type(arg).__name__))(argument)
def is_list(argument):
return is_constraint(lambda arg: isinstance(arg, list),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be list" % (arg, type(arg).__name__))(argument)
def is_dict(argument):
return is_constraint(lambda arg: isinstance(arg, dict),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be dict" % (arg, type(arg).__name__))(argument)
def is_uploaded_file(argument):
return is_constraint(lambda arg: isinstance(arg, FileUploadField),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be uploaded file" % (arg, type(arg).__name__))(argument)
def is_dict2(mandatory_keys, optional_keys=None):
def _dict2(argument):
argdict = is_dict(argument)
keys = argument.keys()
for mand_key in mandatory_keys:
try:
keys.remove(mand_key)
except:
raise Exception("Was expecting key '%s' in dict '%s'" \
% (mand_key, argdict))
if optional_keys is None:
_optional_keys = []
else:
_optional_keys = optional_keys
for opt_key in _optional_keys:
try:
keys.remove(opt_key)
except:
continue
if len(keys) > 0:
raise Exception("Unexpected key in dict '%s': '%s'" % (argdict, keys))
return argdict
return _dict2
def is_list_dict(argument):
mylist = is_list(argument)
for arg in mylist:
_dict = is_dict(arg)
return mylist
def is_list_dict2(mandatory_keys, optional_keys=None):
def _list_dict2(argument):
mylist = is_list_dict(argument)
for arg in mylist:
_dict = is_dict2(mandatory_keys, optional_keys)(arg)
return mylist
return _list_dict2
def check_arguments(expected_params, args):
""" Check, convert POST arguments provided as dict.
Parameter
---------
expected_params: list
list of expected parameters where a parameter is a tuple
(name, constraint) for mandatory argument
(name, constraint, default_value) for optional parameter
where constraint is 'string', 'int', 'posint', 'posnulint', 'list'
args: dict
args[name] = value
Returns
-------
A list of all correct and converted parameters in the same order
as the expected_params argument.
Or raise an exception if one of the expected arguments is not there,
or does not respect the corresponding constraint, or was not expected.
"""
parsed_args = []
for param in expected_params:
if len(param) >= 2:
name = param[0]
constraint = param[1]
if name in args:
value = args.pop(name)
try:
parsed_value = constraint(value)
parsed_args.append(parsed_value)
except Exception as ex:
raise Exception("Parameter '%s': %s." % (name, ex))
else:
if len(param) >= 3:
default_value = param[2]
# TODO: decide whether the default value should satisfy the constraint
parsed_args.append(default_value)
else:
raise Exception("Missing the mandatory parameter '%s'." % name)
else:
raise Exception("Unexpected number of arguments describing a parameter: %s" % param)
if len(args) > 0:
raise Exception("Unexpected parameters: %s." % args)
if len(parsed_args) == 1:
return parsed_args[0]
else:
return parsed_args
|
paulsmith/geodjango
|
django/contrib/gis/gdal/prototypes/ds.py
|
"""
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import c_char_p, c_int, c_long, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, void_output, voidptr_output
c_int_p = POINTER(c_int) # shortcut type
### Driver Routines ###
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p])
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p])
### DataSource ###
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
### Layer Routines ###
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
### Feature Definition Routines ###
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
### Feature Routines ###
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime, [c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p])
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
### Field Routines ###
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
|
quantmind/pulsar-queue
|
pq/backends/redis.py
|
from .. import mq
class MQ(mq.MQ):
"""Redis Message Broker
"""
def __init__(self, backend, store):
super().__init__(backend, store)
self._client = store.client()
async def get_message(self, *queues):
'''Asynchronously retrieve a :class:`Task` from queues
:return: a :class:`.Task` or ``None``.
'''
assert queues
args = [self.prefixed(q) for q in queues]
args.append(max(1, int(self.cfg.task_pool_timeout)))
qt = await self._client.execute('brpop', *args)
if qt:
_, message = qt
return self.decode(message)
async def flush_queues(self, *queues):
'''Clear a list of task queues
'''
pipe = self._client.pipeline()
for queue in queues:
pipe.execute('del', self.prefixed(queue))
await pipe.commit()
async def queue_message(self, queue, message):
'''Asynchronously queue a task
'''
await self._client.lpush(self.prefixed(queue), message)
async def size(self, *queues):
pipe = self._client.pipeline()
for queue in queues:
pipe.execute('llen', self.prefixed(queue))
sizes = await pipe.commit()
return sizes
async def incr(self, name):
concurrent = await self._client.incr(self.prefixed(name))
return concurrent
async def decr(self, name):
concurrent = await self._client.decr(self.prefixed(name))
return concurrent
|
robotican/ric
|
ric_board/scripts/RiCConfigurator/GUI/Schemes/UsbRoles.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UsbRoles.ui'
#
# Created: Wed Jun 10 09:17:48 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_UsbRoles(object):
def setupUi(self, UsbRoles):
UsbRoles.setObjectName(_fromUtf8("UsbRoles"))
UsbRoles.resize(210, 92)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
UsbRoles.setWindowIcon(icon)
UsbRoles.setStyleSheet(_fromUtf8("QDialog {\n"
" background-color: qlineargradient(spread:pad, x1:1, y1:0.682, x2:0.966825, y2:0, stop:0 rgba(224, 224, 224, 255), stop:1 rgba(171, 171, 171, 255));\n"
"}"))
self.buttonBox = QtGui.QDialogButtonBox(UsbRoles)
self.buttonBox.setGeometry(QtCore.QRect(20, 60, 181, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.password = QtGui.QLineEdit(UsbRoles)
self.password.setGeometry(QtCore.QRect(80, 10, 121, 21))
self.password.setEchoMode(QtGui.QLineEdit.Password)
self.password.setObjectName(_fromUtf8("password"))
self.label = QtGui.QLabel(UsbRoles)
self.label.setGeometry(QtCore.QRect(10, 0, 81, 41))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(UsbRoles)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), UsbRoles.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), UsbRoles.reject)
QtCore.QMetaObject.connectSlotsByName(UsbRoles)
def retranslateUi(self, UsbRoles):
UsbRoles.setWindowTitle(_translate("UsbRoles", "USB Roles", None))
self.password.setPlaceholderText(_translate("UsbRoles", "Enter password", None))
self.label.setText(_translate("UsbRoles", "Password:", None))
import resource_rc
|
heavenshell/py-robo-talk
|
tests/test_talk_handler.py
|
# -*- coding: utf-8 -*-
"""
robo.tests.test_talk_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for robo.handlers.talk.
:copyright: (c) 2015 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import requests
import simplejson as json
from mock import patch
from unittest import TestCase
from robo.robot import Robot
from robo.handlers.talk import Client, Talk
def dummy_response(m):
response = requests.Response()
response.status_code = 200
data = {
'context': 'D0yHgwljc_mhTPIGs--toQ',
'utt': '\u30ac\u30c3', 'da': '0', 'yomi': '\u30ac\u30c3',
'mode': 'dialog'
}
response._content = json.dumps(data)
m.return_value = response
class NullAdapter(object):
def __init__(self, signal):
self.signal = signal
self.responses = []
def say(self, message, **kwargs):
self.responses.append(message)
return message
class TestClient(TestCase):
@classmethod
def setUpClass(cls):
os.environ['DOCOMO_API_KEY'] = 'foo'
cls.client = Client()
@patch('doco.requests.post')
def test_generate_url(self, m):
""" Client().talk() should response docomo dialogue response. """
dummy_response(m)
ret = self.client.talk('nullpo')
self.assertEqual(ret, '\u30ac\u30c3')
class TestTalkHandler(TestCase):
@classmethod
def setUpClass(cls):
logger = logging.getLogger('robo')
logger.level = logging.ERROR
cls.robot = Robot('test', logger)
cls.robot.register_default_handlers()
os.environ['DOCOMO_API_KEY'] = 'foo'
talk = Talk()
talk.signal = cls.robot.handler_signal
method = cls.robot.parse_handler_methods(talk)
cls.robot.handlers.extend(method)
adapter = NullAdapter(cls.robot.handler_signal)
cls.robot.adapters['null'] = adapter
@patch('doco.requests.post')
def test_should_talk(self, m):
""" Talk().get() should response docomo dialogue response. """
dummy_response(m)
self.robot.handler_signal.send('test aaaa')
self.assertEqual(self.robot.adapters['null'].responses[0],
'\u30ac\u30c3')
self.robot.adapters['null'].responses = []
|
davidjcox/artlaasya
|
artlaasya/signals.py
|
'''artlaasya signals'''
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save, pre_delete
try:
from django.utils.text import slugify
except ImportError:
try:
from django.template.defaultfilters import slugify
except ImportError:
print("Unable to import `slugify`.")
except:
print("Unable to import `slugify`.")
from decimal import Decimal
from artlaasya.utils import is_django_version_greater_than, delete_uploaded_file
from artlaasya.models import (Artist,
ArtistRatchet,
Genre,
Artwork,
ArtworkRatchet,
Event,
EventRatchet)
DJANGO_SAVE_UPDATEABLE = is_django_version_greater_than(1, 4)
@receiver(pre_save, sender=Artist)
def slugify__artist(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `name`.
Artist [`first_name` + `last_name` + suffix] --> `slug`.
"""
name_fields_changed = ('first_name' in instance.changed_fields or
'last_name' in instance.changed_fields)
if (name_fields_changed or not instance.slug):
_name = instance.__str__().lower()
_ratchet, _created = ArtistRatchet.ratchets.get_or_create(name=_name)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.slug = slugify('-'.join([_name, _suffix]))
@receiver(post_save, sender=Artist)
def deactivate_artworks_of_inactive_artist(sender, instance, created, **kwargs):
"""
Ensures that all artworks of an artist are deactivated when artist is
deactivated.
"""
is_active_field_changed = ('is_active' in instance.changed_fields)
if (is_active_field_changed and not instance.is_active):
for _artwork in instance.artworks_authored.all():
if _artwork.is_active:
_artwork.is_active = False
if DJANGO_SAVE_UPDATEABLE:
_artwork.save(update_fields=['is_active'])
else:
_artwork.save()
@receiver(pre_save, sender=Artist, dispatch_uid="d__a_b")
def delete__artist_biography(sender, instance, **kwargs):
"""
If file already exists, but new file uploaded, delete existing file.
"""
biography_field_changed = ('biography' in instance.changed_fields)
if biography_field_changed:
previous_file = instance.get_field_diff('biography')[0]
if previous_file:
delete_uploaded_file(previous_file.path)
@receiver(pre_delete, sender=Artist, dispatch_uid="d__a")
def delete__artist(sender, instance, **kwargs):
"""
Deletes `biography` uploaded file when Artist is deleted.
"""
if instance.biography:
delete_uploaded_file(instance.biography.path)
@receiver(pre_save, sender=Genre)
def slugify__genre(sender, instance, slugify=slugify, **kwargs):
"""
Manages the slugifying of `name`.
Genre [`name`] --> `slug`.
"""
name_fields_changed = ('name' in instance.changed_fields)
if (name_fields_changed or not instance.slug):
_name = instance.__str__().lower()
instance.slug = slugify(_name)
@receiver(pre_save, sender=Artwork)
def name_slugify__artwork(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `title`.
Artwork [`title` + suffix] --> `name' --> `slug`.
UploadedImage provides `name` and `slug`.
"""
title_field_changed = ('title' in instance.changed_fields)
if (title_field_changed or not instance.name):
_title=instance.title.lower()
_ratchet, _created = ArtworkRatchet.ratchets.get_or_create(title=_title)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.name = '-'.join([instance.title, _suffix])
instance.slug = slugify(instance.name)
@receiver(pre_save, sender=Artwork)
def calculate_artwork_dimensions(sender, instance, **kwargs):
"""
Calculates artwork measurements in other measurement system.
"""
dimension_fields_changed = ('image_height' in instance.changed_fields or
'image_width' in instance.changed_fields or
'measurement_units' in instance.changed_fields)
if (dimension_fields_changed or
not instance.image_height and not instance.image_width):
if instance.measurement_units == 'I':
instance.height_imperial = instance.image_height
instance.width_imperial = instance.image_width
instance.imperial_units = 'I'
instance.height_metric = round((Decimal(2.54) * instance.image_height), 2)
instance.width_metric = round((Decimal(2.54) * instance.image_width), 2)
instance.metric_units = 'C'
elif instance.measurement_units == 'C':
instance.height_metric = instance.image_height
instance.width_metric = instance.image_width
instance.metric_units = 'C'
instance.height_imperial = round((Decimal(0.394) * instance.image_height), 2)
instance.width_imperial = round((Decimal(0.394) * instance.image_width), 2)
instance.imperial_units = 'I'
@receiver(post_save, sender=Artwork)
def ensure_artwork_uniquely_representative(sender, instance, created, **kwargs):
"""
Ensures that only one artwork is representative for any one artist.
"""
if instance.is_representative:
_artworks = Artwork.artworks.filter(artist__slug=instance.artist.slug
).exclude(slug=instance.slug)
for _artwork in _artworks:
if _artwork.is_representative:
_artwork.is_representative = False
if DJANGO_SAVE_UPDATEABLE:
_artwork.save(update_fields=['is_representative'])
else:
_artwork.save()
@receiver(pre_save, sender=Event)
def slugify__event(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `title`.
Event [`title` + suffix] --> `slug`.
"""
title_field_changed = ('title' in instance.changed_fields)
if (title_field_changed or not instance.title):
_title=instance.title.lower()
_ratchet, _created = EventRatchet.ratchets.get_or_create(title=_title)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.slug = slugify('-'.join([_title, _suffix]))
@receiver(pre_save, sender=Event, dispatch_uid="d__e_i")
def delete__event_image(sender, instance, **kwargs):
"""
If image already exists, but new image uploaded, deletes existing image file.
"""
image_field_changed = ('image' in instance.changed_fields)
if image_field_changed:
previous_image = instance.get_field_diff('image')[0]
if previous_image:
delete_uploaded_file(previous_image.path)
@receiver(pre_delete, sender=Event, dispatch_uid="d__e")
def delete__event(sender, instance, **kwargs):
"""
Deletes `image` uploaded file when Event is deleted.
"""
delete_uploaded_file(instance.image.path)
#EOF - artlaasya signals
|
hotsyk/uapython2
|
uapython2/users/migrations/0003_auto_20150809_0918.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_activity'),
]
operations = [
migrations.AddField(
model_name='user',
name='age',
field=models.PositiveIntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='user',
name='country',
field=models.CharField(max_length=30, verbose_name='Country', blank=True),
),
]
|
larsoner/mne-python
|
examples/visualization/xhemi.py
|
# -*- coding: utf-8 -*-
"""
===========================
Cross-hemisphere comparison
===========================
This example illustrates how to visualize the difference between activity in
the left and the right hemisphere. The data from the right hemisphere is
mapped to the left hemisphere, and then the difference is plotted. For more
information see :func:`mne.compute_source_morph`.
"""
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD-3-Clause
# %%
import mne
data_dir = mne.datasets.sample.data_path()
subjects_dir = data_dir / 'subjects'
stc_path = data_dir / 'MEG' / 'sample' / 'sample_audvis-meg-eeg'
stc = mne.read_source_estimate(stc_path, 'sample')
# First, morph the data to fsaverage_sym, for which we have left_right
# registrations:
stc = mne.compute_source_morph(stc, 'sample', 'fsaverage_sym', smooth=5,
warn=False,
subjects_dir=subjects_dir).apply(stc)
# Compute a morph-matrix mapping the right to the left hemisphere,
# and vice-versa.
morph = mne.compute_source_morph(stc, 'fsaverage_sym', 'fsaverage_sym',
spacing=stc.vertices, warn=False,
subjects_dir=subjects_dir, xhemi=True,
verbose='error') # creating morph map
stc_xhemi = morph.apply(stc)
# Now we can subtract them and plot the result:
diff = stc - stc_xhemi
diff.plot(hemi='lh', subjects_dir=subjects_dir, initial_time=0.07,
size=(800, 600))
|
Purg/SMQTK
|
python/smqtk/compute_functions.py
|
"""
Collection of higher level functions to perform operational tasks.
Some day, this module could have a companion module containing the CLI logic
for these functions instead of scripts in ``<source>/bin/scripts``.
"""
import collections
import logging
import numpy
from smqtk.utils import (
bin_utils,
bit_utils,
parallel,
)
__author__ = "paul.tunison@kitware.com"
def compute_many_descriptors(file_elements, descr_generator, descr_factory,
descr_index, batch_size=None, overwrite=False,
procs=None, **kwds):
"""
Compute descriptors for each data file path, yielding
(filepath, DescriptorElement) tuple pairs in the order that they were
input.
*Note:* **This function currently only operated over images due to the
specific data validity check/filter performed.*
:param file_elements: Iterable of DataFileElement instances of files to
work on.
:type file_elements: collections.Iterable[smqtk.representation.data_element
.file_element.DataFileElement]
:param descr_generator: DescriptorGenerator implementation instance
to use to generate descriptor vectors.
:type descr_generator: smqtk.algorithms.DescriptorGenerator
:param descr_factory: DescriptorElement factory to use when producing
descriptor vectors.
:type descr_factory: smqtk.representation.DescriptorElementFactory
:param descr_index: DescriptorIndex instance to add generated descriptors
to. When given a non-zero batch size, we add descriptors to the given
index in batches of that size. When a batch size is not given, we add
all generated descriptors to the index after they have been generated.
:type descr_index: smqtk.representation.DescriptorIndex
:param batch_size: Optional number of elements to asynchronously compute
at a time. This is useful when it is desired for this function to yield
results before all descriptors have been computed, yet still take
advantage of any batch asynchronous computation optimizations a
particular DescriptorGenerator implementation may have. If this is 0 or
None (false-evaluating), this function blocks until all descriptors have
been generated.
:type batch_size: None | int | long
:param overwrite: If descriptors from a particular generator already exist
for particular data, re-compute the descriptor for that data and set
into the generated DescriptorElement.
:type overwrite: bool
:param procs: Tell the DescriptorGenerator to use a specific number of
threads/cores.
:type procs: None | int
:param kwds: Remaining keyword-arguments that are to be passed into the
``compute_descriptor_async`` function on the descriptor generator.
:type kwds: dict
:return: Generator that yields (filepath, DescriptorElement) for each file
path given, in the order file paths were provided.
:rtype: __generator[(str, smqtk.representation.DescriptorElement)]
"""
log = logging.getLogger(__name__)
# Capture of generated elements in order of generation
#: :type: deque[smqtk.representation.data_element.file_element.DataFileElement]
dfe_deque = collections.deque()
# Counts for logging
total = 0
unique = 0
def iter_capture_elements():
for dfe in file_elements:
dfe_deque.append(dfe)
yield dfe
if batch_size:
log.debug("Computing in batches of size %d", batch_size)
batch_i = 0
for dfe in iter_capture_elements():
# elements captured ``dfe_deque`` in iter_capture_elements
if len(dfe_deque) == batch_size:
batch_i += 1
log.debug("Computing batch %d", batch_i)
total += len(dfe_deque)
m = descr_generator.compute_descriptor_async(
dfe_deque, descr_factory, overwrite, procs, **kwds
)
unique += len(m)
log.debug("-- Processed %d so far (%d total data elements "
"input)", unique, total)
log.debug("-- adding to index")
descr_index.add_many_descriptors(m.itervalues())
log.debug("-- yielding generated descriptor elements")
for e in dfe_deque:
# noinspection PyProtectedMember
yield e._filepath, m[e]
dfe_deque.clear()
if len(dfe_deque):
log.debug("Computing final batch of size %d",
len(dfe_deque))
total += len(dfe_deque)
m = descr_generator.compute_descriptor_async(
dfe_deque, descr_factory, overwrite, procs, **kwds
)
unique += len(m)
log.debug("-- Processed %d so far (%d total data elements "
"input)", unique, total)
log.debug("-- adding to index")
descr_index.add_many_descriptors(m.itervalues())
log.debug("-- yielding generated descriptor elements")
for dfe in dfe_deque:
# noinspection PyProtectedMember
yield dfe._filepath, m[dfe]
else:
log.debug("Using single async call")
# Just do everything in one call
log.debug("Computing descriptors")
m = descr_generator.compute_descriptor_async(
iter_capture_elements(), descr_factory,
overwrite, procs, **kwds
)
log.debug("Adding to index")
descr_index.add_many_descriptors(m.itervalues())
log.debug("yielding generated elements")
for dfe in dfe_deque:
# noinspection PyProtectedMember
yield dfe._filepath, m[dfe]
def compute_hash_codes(uuids, index, functor, hash2uuids=None,
report_interval=1.0, use_mp=False):
"""
Given an iterable of DescriptorElement UUIDs, asynchronously access them
from the given ``index``, asynchronously compute hash codes via ``functor``
and convert to an integer, yielding (DescriptorElement, hash-int) pairs.
The dictionary input and returned is of the same format used by the
``LSHNearestNeighborIndex`` implementation (mapping pointed to by the
``hash2uuid_cache_filepath`` attribute).
:param uuids: Sequence of UUIDs to process
:type uuids: collections.Iterable[collections.Hashable]
:param index: Descriptor index to pull from.
:type index: smqtk.representation.descriptor_index.DescriptorIndex
:param functor: LSH hash code functor instance
:type functor: smqtk.algorithms.LshFunctor
:param hash2uuids: Hash code to UUID set to update, which is also returned
from this function. If not provided, we will start a new mapping, which
is returned instead.
:type hash2uuids: dict[int|long, set[collections.Hashable]]
:param report_interval: Frequency in seconds at which we report speed and
completion progress via logging. Reporting is disabled when logging
is not in debug and this value is greater than 0.
:type report_interval: float
:param use_mp: If multiprocessing should be used for parallel
computation vs. threading. Reminder: This will copy currently loaded
objects onto worker processes (e.g. the given index), which could lead
to dangerously high RAM consumption.
:type use_mp: bool
:return: The ``update_map`` provided or, if None was provided, a new
mapping.
:rtype: dict[int|long, set[collections.Hashable]]
"""
if hash2uuids is None:
hash2uuids = {}
# TODO: parallel map fetch elements from index?
# -> separately from compute
def get_hash(u):
v = index.get_descriptor(u).vector()
return u, bit_utils.bit_vector_to_int_large(functor.get_hash(v))
# Setup log and reporting function
log = logging.getLogger(__name__)
report_state = [0] * 7
# noinspection PyGlobalUndefined
if log.getEffectiveLevel() > logging.DEBUG or report_interval <= 0:
def report_progress(*_):
return
log.debug("Not logging progress")
else:
log.debug("Logging progress at %f second intervals", report_interval)
report_progress = bin_utils.report_progress
log.debug("Starting computation")
for uuid, hash_int in parallel.parallel_map(get_hash, uuids,
ordered=False,
use_multiprocessing=use_mp):
if hash_int not in hash2uuids:
hash2uuids[hash_int] = set()
hash2uuids[hash_int].add(uuid)
# Progress reporting
report_progress(log.debug, report_state, report_interval)
# Final report
report_state[1] -= 1
report_progress(log.debug, report_state, 0.0)
return hash2uuids
def mb_kmeans_build_apply(index, mbkm, initial_fit_size):
"""
Build the MiniBatchKMeans centroids based on the descriptors in the given
index, then predicting descriptor clusters with the final result model.
If the given index is empty, no fitting or clustering occurs and an empty
dictionary is returned.
:param index: Index of descriptors
:type index: smqtk.representation.DescriptorIndex
:param mbkm: Scikit-Learn MiniBatchKMeans instead to train and then use for
prediction
:type mbkm: sklearn.cluster.MiniBatchKMeans
:param initial_fit_size: Number of descriptors to run an initial fit with.
This brings the advantage of choosing a best initialization point from
multiple.
:type initial_fit_size: int
:return: Dictionary of the cluster label (integer) to the set of descriptor
UUIDs belonging to that cluster.
:rtype: dict[int, set[collections.Hashable]]
"""
log = logging.getLogger(__name__)
ifit_completed = False
k_deque = collections.deque()
d_fitted = 0
log.info("Getting index keys (shuffled)")
index_keys = sorted(index.iterkeys())
numpy.random.seed(mbkm.random_state)
numpy.random.shuffle(index_keys)
def parallel_iter_vectors(descriptors):
""" Get the vectors for the descriptors given.
Not caring about order returned.
"""
return parallel.parallel_map(lambda d: d.vector(), descriptors,
use_multiprocessing=False)
def get_vectors(k_iter):
""" Get numpy array of descriptor vectors (2D array returned) """
return numpy.array(list(
parallel_iter_vectors(index.get_many_descriptors(k_iter))
))
log.info("Collecting iteratively fitting model")
rps = [0] * 7
for i, k in enumerate(index_keys):
k_deque.append(k)
bin_utils.report_progress(log.debug, rps, 1.)
if initial_fit_size and not ifit_completed:
if len(k_deque) == initial_fit_size:
log.info("Initial fit using %d descriptors", len(k_deque))
log.info("- collecting vectors")
vectors = get_vectors(k_deque)
log.info("- fitting model")
mbkm.fit(vectors)
log.info("- cleaning")
d_fitted += len(vectors)
k_deque.clear()
ifit_completed = True
elif len(k_deque) == mbkm.batch_size:
log.info("Partial fit with batch size %d", len(k_deque))
log.info("- collecting vectors")
vectors = get_vectors(k_deque)
log.info("- fitting model")
mbkm.partial_fit(vectors)
log.info("- cleaning")
d_fitted += len(k_deque)
k_deque.clear()
# Final fit with any remaining descriptors
if k_deque:
log.info("Final partial fit of size %d", len(k_deque))
log.info('- collecting vectors')
vectors = get_vectors(k_deque)
log.info('- fitting model')
mbkm.partial_fit(vectors)
log.info('- cleaning')
d_fitted += len(k_deque)
k_deque.clear()
log.info("Computing descriptor classes with final KMeans model")
mbkm.verbose = False
d_classes = collections.defaultdict(set)
d_uv_iter = parallel.parallel_map(lambda d: (d.uuid(), d.vector()),
index,
use_multiprocessing=False,
name="uv-collector")
# TODO: Batch predict call inputs to something larger than one at a time.
d_uc_iter = parallel.parallel_map(
lambda (u, v): (u, mbkm.predict(v[numpy.newaxis, :])[0]),
d_uv_iter,
use_multiprocessing=False,
name="uc-collector")
rps = [0] * 7
for uuid, c in d_uc_iter:
d_classes[c].add(uuid)
bin_utils.report_progress(log.debug, rps, 1.)
rps[1] -= 1
bin_utils.report_progress(log.debug, rps, 0)
return d_classes
|
mattduan/proof
|
pk/generator/IDGenerator.py
|
"""
Interface to be implemented by id generators. It is possible
that some implementations might not require all the arguments,
for example MySQL will not require a keyInfo Object, while the
IDBroker implementation does not require a Connection as
it only rarely needs one and retrieves a connection from the
Connection pool service only when needed.
"""
__version__='$Revision: 3194 $'[11:-2]
import logging
import util.logger.Logger as Logger
import proof.ProofException as ProofException
class IDGenerator:
__is__ = 'interface'
def __init__(self, logger=None):
self.__logger = Logger.makeLogger(logger)
self.log = self.__logger.write
pass
def getId(self, connection=None, key_info=None):
""" Returns an id.
@param connection A Connection.
@param key_info an Object that contains additional info.
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.getId: need to be overrided." )
def isPriorToInsert(self):
""" A flag to determine the timing of the id generation.
@return a <code>boolean</code> value
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.isPriorToInsert: need to be overrided." )
def isPostInsert(self):
""" A flag to determine the timing of the id generation
@return Whether id is availble post-<code>insert</code>.
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.isPostInsert: need to be overrided." )
def isConnectionRequired(self):
""" A flag to determine whether a Connection is required to
generate an id.
@return a <code>boolean</code> value
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.isConnectionRequired: need to be overrided." )
|
qwhelan/asv
|
asv/plugins/virtualenv.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from distutils.version import LooseVersion
import sys
import re
import os
import six
from .. import environment
from ..console import log
from .. import util
WIN = (os.name == "nt")
class Virtualenv(environment.Environment):
"""
Manage an environment using virtualenv.
"""
tool_name = "virtualenv"
def __init__(self, conf, python, requirements, tagged_env_vars):
"""
Parameters
----------
conf : Config instance
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
executable : str
Path to Python executable.
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
"""
executable = Virtualenv._find_python(python)
if executable is None:
raise environment.EnvironmentUnavailable(
"No executable found for python {0}".format(python))
self._executable = executable
self._python = python
self._requirements = requirements
super(Virtualenv, self).__init__(conf,
python,
requirements,
tagged_env_vars)
try:
import virtualenv
except ImportError:
raise environment.EnvironmentUnavailable(
"virtualenv package not installed")
@staticmethod
def _find_python(python):
"""Find Python executable for the given Python version"""
is_pypy = python.startswith("pypy")
# Parse python specifier
if is_pypy:
executable = python
if python == 'pypy':
python_version = '2'
else:
python_version = python[4:]
else:
python_version = python
executable = "python{0}".format(python_version)
# Find Python executable on path
try:
return util.which(executable)
except IOError:
pass
# Maybe the current one is correct?
current_is_pypy = hasattr(sys, 'pypy_version_info')
current_versions = ['{0[0]}'.format(sys.version_info),
'{0[0]}.{0[1]}'.format(sys.version_info)]
if is_pypy == current_is_pypy and python_version in current_versions:
return sys.executable
return None
@property
def name(self):
"""
Get a name to uniquely identify this environment.
"""
python = self._python
if self._python.startswith('pypy'):
# get_env_name adds py-prefix
python = python[2:]
return environment.get_env_name(self.tool_name,
python,
self._requirements,
self._tagged_env_vars)
@classmethod
def matches(self, python):
if not (re.match(r'^[0-9].*$', python) or re.match(r'^pypy[0-9.]*$', python)):
# The python name should be a version number, or pypy+number
return False
try:
import virtualenv
except ImportError:
return False
else:
if LooseVersion(virtualenv.__version__) == LooseVersion('1.11.0'):
log.warning(
"asv is not compatible with virtualenv 1.11 due to a bug in "
"setuptools.")
if LooseVersion(virtualenv.__version__) < LooseVersion('1.10'):
log.warning(
"If using virtualenv, it much be at least version 1.10")
executable = Virtualenv._find_python(python)
return executable is not None
def _setup(self):
"""
Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`.
"""
env = dict(os.environ)
env.update(self.build_env_vars)
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
'--no-site-packages',
"-p",
self._executable,
self._path], env=env)
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements()
def _install_requirements(self):
if sys.version_info[:2] == (3, 2):
pip_args = ['install', '-v', 'wheel<0.29.0', 'pip<8']
else:
pip_args = ['install', '-v', 'wheel', 'pip>=8']
env = dict(os.environ)
env.update(self.build_env_vars)
self._run_pip(pip_args, env=env)
if self._requirements:
args = ['install', '-v', '--upgrade']
for key, val in six.iteritems(self._requirements):
pkg = key
if key.startswith('pip+'):
pkg = key[4:]
if val:
args.append("{0}=={1}".format(pkg, val))
else:
args.append(pkg)
self._run_pip(args, timeout=self._install_timeout, env=env)
def _run_pip(self, args, **kwargs):
# Run pip via python -m pip, so that it works on Windows when
# upgrading pip itself, and avoids shebang length limit on Linux
return self.run_executable('python', ['-mpip'] + list(args), **kwargs)
def run(self, args, **kwargs):
log.debug("Running '{0}' in {1}".format(' '.join(args), self.name))
return self.run_executable('python', args, **kwargs)
|
Pathel/deuterium
|
src/utils/inlinefunc.py
|
"""
Inlinefunc
This is a simple inline text language for use to custom-format text
in Evennia. It is applied BEFORE ANSI/MUX parsing is applied.
To activate Inlinefunc, settings.INLINEFUNC_ENABLED must be set.
The format is straightforward:
{funcname([arg1,arg2,...]) text {/funcname
Example:
"This is {pad(50,c,-) a center-padded text{/pad of width 50."
->
"This is -------------- a center-padded text--------------- of width 50."
This can be inserted in any text, operated on by the parse_inlinefunc
function. funcname() (no space is allowed between the name and the
argument tuple) is picked from a selection of valid functions from
settings.INLINEFUNC_MODULES.
Commands can be nested, and will applied inside-out. For correct
parsing their end-tags must match the starting tags in reverse order.
Example:
"The time is {pad(30){time(){/time{/padright now."
->
"The time is Oct 25, 11:09 right now."
An inline function should have the following call signature:
def funcname(text, *args)
where the text is always the part between {funcname(args) and
{/funcname and the *args are taken from the appropriate part of the
call. It is important that the inline function properly clean the
incoming args, checking their type and replacing them with sane
defaults if needed. If impossible to resolve, the unmodified text
should be returned. The inlinefunc should never cause a traceback.
"""
import re
from django.conf import settings
from src.utils import utils
# inline functions
def pad(text, *args, **kwargs):
"Pad to width. pad(text, width=78, align='c', fillchar=' ')"
width = 78
align = 'c'
fillchar = ' '
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.isdigit() else width
elif iarg == 1:
align = arg if arg in ('c', 'l', 'r') else align
elif iarg == 2:
fillchar = arg[0]
else:
break
return utils.pad(text, width=width, align=align, fillchar=fillchar)
def crop(text, *args, **kwargs):
"Crop to width. crop(text, width=78, suffix='[...]')"
width = 78
suffix = "[...]"
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.isdigit() else width
elif iarg == 1:
suffix = arg
else:
break
return utils.crop(text, width=width, suffix=suffix)
def wrap(text, *args, **kwargs):
"Wrap/Fill text to width. fill(text, width=78, indent=0)"
width = 78
indent = 0
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.isdigit() else width
elif iarg == 1:
indent = int(arg) if arg.isdigit() else indent
return utils.wrap(text, width=width, indent=indent)
def time(text, *args, **kwargs):
"Inserts current time"
import time
strformat = "%h %d, %H:%M"
if args and args[0]:
strformat = str(args[0])
return time.strftime(strformat)
def you(text, *args, **kwargs):
"Inserts your name"
name = "You"
sess = kwargs.get("session")
if sess and sess.puppet:
name = sess.puppet.key
return name
# load functions from module (including this one, if using default settings)
_INLINE_FUNCS = {}
for module in utils.make_iter(settings.INLINEFUNC_MODULES):
_INLINE_FUNCS.update(utils.all_from_module(module))
_INLINE_FUNCS.pop("inline_func_parse", None)
# dynamically build regexes for found functions
_RE_FUNCFULL = r"\{%s\((.*?)\)(.*?){/%s"
_RE_FUNCFULL_SINGLE = r"\{%s\((.*?)\)"
_RE_FUNCSTART = r"\{((?:%s))"
_RE_FUNCEND = r"\{/((?:%s))"
_RE_FUNCSPLIT = r"(\{/*(?:%s)(?:\(.*?\))*)"
_RE_FUNCCLEAN = r"\{%s\(.*?\)|\{/%s"
_INLINE_FUNCS = dict((key, (func, re.compile(_RE_FUNCFULL % (key, key), re.DOTALL & re.MULTILINE),
re.compile(_RE_FUNCFULL_SINGLE % key, re.DOTALL & re.MULTILINE)))
for key, func in _INLINE_FUNCS.items() if callable(func))
_FUNCSPLIT_REGEX = re.compile(_RE_FUNCSPLIT % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCSTART_REGEX = re.compile(_RE_FUNCSTART % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCEND_REGEX = re.compile(_RE_FUNCEND % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCCLEAN_REGEX = re.compile("|".join([_RE_FUNCCLEAN % (key, key) for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
# inline parser functions
def _execute_inline_function(funcname, text, session):
"""
Get the enclosed text between {funcname(...) and {/funcname
and execute the inline function to replace the whole block
with the result.
Note that this lookup is "dumb" - we just grab the first end
tag we find. So to work correctly this function must be called
"inside out" on a nested function tree, so each call only works
on a "flat" tag.
"""
def subfunc(match):
"replace the entire block with the result of the function call"
args = [part.strip() for part in match.group(1).split(",")]
intext = match.group(2)
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0](intext, *args, **kwargs)
return _INLINE_FUNCS[funcname][1].sub(subfunc, text)
def _execute_inline_single_function(funcname, text, session):
"""
Get the arguments of a single function call (no matching end tag)
and execute it with an empty text input.
"""
def subfunc(match):
"replace the single call with the result of the function call"
args = [part.strip() for part in match.group(1).split(",")]
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0]("", *args, **kwargs)
return _INLINE_FUNCS[funcname][2].sub(subfunc, text)
def parse_inlinefunc(text, strip=False, session=None):
"""
Parse inline function-replacement.
strip - remove all supported inlinefuncs from text
session - session calling for the parsing
"""
if strip:
# strip all functions
return _FUNCCLEAN_REGEX.sub("", text)
stack = []
for part in _FUNCSPLIT_REGEX.split(text):
endtag = _FUNCEND_REGEX.match(part)
if endtag:
# an end tag
endname = endtag.group(1)
while stack:
new_part = stack.pop()
part = new_part + part # add backwards -> fowards
starttag = _FUNCSTART_REGEX.match(new_part)
if starttag:
startname = starttag.group(1)
if startname == endname:
part = _execute_inline_function(startname, part, session)
break
stack.append(part)
# handle single functions without matching end tags; these are treated
# as being called with an empty string as text argument.
outstack = []
for part in _FUNCSPLIT_REGEX.split("".join(stack)):
starttag = _FUNCSTART_REGEX.match(part)
if starttag:
startname = starttag.group(1)
part = _execute_inline_single_function(startname, part, session)
outstack.append(part)
return "".join(outstack)
def _test():
# this should all be handled
s = "This is a text with a{pad(78,c,-)text {pad(5)of{/pad {pad(30)nice{/pad size{/pad inside {pad(4,l)it{/pad."
s2 = "This is a text with a----------------text of nice size---------------- inside it ."
t = parse_inlinefunc(s)
assert(t == s2)
return t
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractInfiniteNovelTranslations.py
|
def extractInfiniteNovelTranslations(item):
"""
# Infinite Novel Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('Ascendance of a Bookworm', 'Ascendance of a Bookworm', 'translated'),
('Yomigaeri no Maou', 'Yomigaeri no Maou', 'translated'),
('Kakei Senki wo Kakageyo!', 'Kakei Senki wo Kakageyo!', 'translated'),
('Kuro no Shoukan Samurai', 'Kuro no Shoukan Samurai', 'translated'),
('Nidoume no Jinsei wo Isekai de', 'Nidoume no Jinsei wo Isekai de', 'translated'),
('Hachi-nan', 'Hachinan tte, Sore wa Nai Deshou!', 'translated'),
('Summoned Slaughterer', 'Yobidasareta Satsuriku-sha', 'translated'),
('maou no utsuwa', 'Maou no Utsuwa', 'translated'),
('Maou no Ki', 'Maou no Ki', 'translated'),
('Imperial wars and my stratagems', 'Imperial Wars and my Stratagems', 'translated'),
('Kuro no Shoukanshi', 'Kuro no Shoukanshi', 'translated'),
('I work as Healer in Another World\'s Labyrinth City', 'I work as Healer in Another World\'s Labyrinth City', 'translated'),
('The Spearmaster and The Black Cat', 'The Spearmaster and The Black Cat', 'translated'),
('Hakai no Miko', 'Hakai no Miko', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
mick-d/nipype
|
nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brains import insertMidACPCpoint
def test_insertMidACPCpoint_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputLandmarkFile=dict(argstr='--inputLandmarkFile %s',
),
outputLandmarkFile=dict(argstr='--outputLandmarkFile %s',
hash_files=False,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = insertMidACPCpoint.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_insertMidACPCpoint_outputs():
output_map = dict(outputLandmarkFile=dict(),
)
outputs = insertMidACPCpoint.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
MAECProject/python-maec
|
maec/package/__init__.py
|
_namespace = 'http://maec.mitre.org/XMLSchema/maec-package-2'
from .action_equivalence import ActionEquivalenceList, ActionEquivalence # noqa
from .malware_subject_reference import MalwareSubjectReference # noqa
from .object_equivalence import ObjectEquivalence, ObjectEquivalenceList # noqa
from .analysis import (Analysis, AnalysisEnvironment, NetworkInfrastructure, # noqa
CapturedProtocolList, CapturedProtocol, # noqa
AnalysisSystemList, AnalysisSystem, InstalledPrograms, # noqa
HypervisorHostSystem, DynamicAnalysisMetadata, # noqa
ToolList, CommentList, Comment, Source) # noqa
from .grouping_relationship import (GroupingRelationshipList, # noqa
GroupingRelationship, ClusteringMetadata, # noqa
ClusteringAlgorithmParameters, # noqa
ClusterComposition, ClusterEdgeNodePair) # noqa
from .malware_subject import (MalwareSubjectList, MalwareSubject, # noqa
MalwareConfigurationDetails, # noqa
MalwareConfigurationObfuscationDetails, # noqa
MalwareConfigurationObfuscationAlgorithm, # noqa
MalwareConfigurationStorageDetails, # noqa
MalwareBinaryConfigurationStorageDetails, # noqa
MalwareConfigurationParameter, # noqa
MalwareDevelopmentEnvironment, # noqa
FindingsBundleList, MetaAnalysis, # noqa
MalwareSubjectRelationshipList, # noqa
MalwareSubjectRelationship, Analyses, # noqa
MinorVariants) # noqa
from .package import Package # noqa
|
mozilla/kitsune
|
kitsune/forums/events.py
|
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _lazy
from tidings.events import InstanceEvent, EventUnion
from kitsune.forums.models import Thread, Forum
from kitsune.sumo.email_utils import emails_with_users_and_watches
from kitsune.sumo.templatetags.jinja_helpers import add_utm
class NewPostEvent(InstanceEvent):
"""An event which fires when a thread receives a reply
Firing this also notifies watchers of the containing forum.
"""
event_type = "thread reply"
content_type = Thread
def __init__(self, reply):
super(NewPostEvent, self).__init__(reply.thread)
# Need to store the reply for _mails
self.reply = reply
def fire(self, **kwargs):
"""Notify not only watchers of this thread but of the parent forum."""
return EventUnion(self, NewThreadEvent(self.reply)).fire(**kwargs)
def _mails(self, users_and_watches):
post_url = add_utm(self.reply.get_absolute_url(), "forums-post")
c = {
"post": self.reply.content,
"post_html": self.reply.content_parsed,
"author": self.reply.author,
"host": Site.objects.get_current().domain,
"thread": self.reply.thread.title,
"forum": self.reply.thread.forum.name,
"post_url": post_url,
}
return emails_with_users_and_watches(
subject=_lazy("Re: {forum} - {thread}"),
text_template="forums/email/new_post.ltxt",
html_template="forums/email/new_post.html",
context_vars=c,
users_and_watches=users_and_watches,
)
class NewThreadEvent(InstanceEvent):
"""An event which fires when a new thread is added to a forum"""
event_type = "forum thread"
content_type = Forum
def __init__(self, post):
super(NewThreadEvent, self).__init__(post.thread.forum)
# Need to store the post for _mails
self.post = post
def _mails(self, users_and_watches):
post_url = add_utm(self.post.thread.get_absolute_url(), "forums-thread")
c = {
"post": self.post.content,
"post_html": self.post.content_parsed,
"author": self.post.author,
"host": Site.objects.get_current().domain,
"thread": self.post.thread.title,
"forum": self.post.thread.forum.name,
"post_url": post_url,
}
return emails_with_users_and_watches(
subject=_lazy("{forum} - {thread}"),
text_template="forums/email/new_thread.ltxt",
html_template="forums/email/new_thread.html",
context_vars=c,
users_and_watches=users_and_watches,
)
|
ZeitOnline/zeit.retresco
|
src/zeit/retresco/testing.py
|
from __future__ import absolute_import
import gocept.httpserverlayer.custom
import json
import mock
import pkg_resources
import plone.testing
import zeit.cms.content.interfaces
import zeit.cms.testcontenttype.testcontenttype
import zeit.cms.testing
import zeit.content.article.testing
import zeit.content.image.testing
import zeit.content.link.testing
import zeit.content.volume.testing
import zeit.find.testing
import zeit.push.testing
import zeit.workflow.testing
HTTP_LAYER = zeit.cms.testing.HTTPLayer(
zeit.cms.testing.RecordingRequestHandler,
name='HTTPLayer', module=__name__)
product_config = """
<product-config zeit.retresco>
base-url http://localhost:[PORT]
elasticsearch-url http://tms-backend.staging.zeit.de:80/elasticsearch
elasticsearch-index zeit_pool
elasticsearch-connection-class zeit.retresco.search.Connection
topic-redirect-prefix http://www.zeit.de
index-principal zope.user
</product-config>
"""
class ElasticsearchMockLayer(plone.testing.Layer):
def setUp(self):
self['elasticsearch_mocker'] = mock.patch(
'elasticsearch.client.Elasticsearch.search')
self['elasticsearch'] = self['elasticsearch_mocker'].start()
filename = pkg_resources.resource_filename(
'zeit.retresco.tests', 'elasticsearch_result.json')
with open(filename) as response:
self['elasticsearch'].return_value = json.load(response)
def tearDown(self):
del self['elasticsearch']
self['elasticsearch_mocker'].stop()
del self['elasticsearch_mocker']
ELASTICSEARCH_MOCK_LAYER = ElasticsearchMockLayer()
class TMSMockLayer(plone.testing.Layer):
def setUp(self):
self['tms_mock'] = mock.Mock()
self['tms_mock'].url = 'http://tms.example.com'
self['tms_mock'].get_article_keywords.return_value = []
self['tms_zca'] = gocept.zcapatch.Patches()
self['tms_zca'].patch_utility(
self['tms_mock'], zeit.retresco.interfaces.ITMS)
def tearDown(self):
self['tms_zca'].reset()
del self['tms_zca']
del self['tms_mock']
def testTearDown(self):
self['tms_mock'].reset_mock()
TMS_MOCK_LAYER = TMSMockLayer()
class ZCMLLayer(zeit.cms.testing.ZCMLLayer):
defaultBases = zeit.cms.testing.ZCMLLayer.defaultBases + (HTTP_LAYER,)
def setUp(self):
self.product_config = self.product_config.replace(
'[PORT]', str(self['http_port']))
super(ZCMLLayer, self).setUp()
ZCML_LAYER = ZCMLLayer(
'ftesting.zcml', product_config=zeit.cms.testing.cms_product_config +
product_config +
zeit.find.testing.product_config +
zeit.push.testing.product_config +
zeit.workflow.testing.product_config +
zeit.content.article.testing.product_config +
zeit.content.link.testing.product_config +
zeit.content.volume.testing.product_config +
zeit.content.image.testing.product_config)
CELERY_LAYER = zeit.cms.testing.CeleryWorkerLayer(
name='CeleryLayer', bases=(ZCML_LAYER,))
CELERY_LAYER.queues += ('search',)
MOCK_ZCML_LAYER = plone.testing.Layer(
bases=(ZCML_LAYER, ELASTICSEARCH_MOCK_LAYER), name='MockZCMLLayer',
module=__name__)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class TagTestHelpers(object):
"""Helper to prefill DAV-Property used for keywords of a content object."""
def set_tags(self, content, xml):
"""Prefill DAV-Property for keywords of `content` with `xml`.
It inserts `xml` into a newly created DAV-property in the
the 'tagging' namespace. `xml` is a string containing XML
representing `Tag` objects, which requires `type` and `text`::
<tag type="Person">Karen Duve</tag>
<tag type="Location">Berlin</tag>
"""
dav = zeit.connector.interfaces.IWebDAVProperties(content)
name, ns = dav_key = zeit.retresco.tagger.KEYWORD_PROPERTY
dav[dav_key] = """<ns:rankedTags xmlns:ns="{ns}">
<rankedTags>{0}</rankedTags></ns:rankedTags>""".format(
xml, ns=ns, tag=name)
def create_testcontent():
content = zeit.cms.testcontenttype.testcontenttype.ExampleContentType()
content.uniqueId = 'http://xml.zeit.de/testcontent'
content.teaserText = 'teaser'
content.title = 'title'
zeit.cms.content.interfaces.IUUID(content).id = 'myid'
return content
|
DrAlexx/pdfium
|
testing/tools/gold.py
|
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shlex
import shutil
# This module collects and writes output in a format expected by the
# Gold baseline tool. Based on meta data provided explicitly and by
# adding a series of test results it can be used to produce
# a JSON file that is uploaded to Google Storage and ingested by Gold.
#
# The output will look similar this:
#
# {
# "build_number" : "2",
# "gitHash" : "a4a338179013b029d6dd55e737b5bd648a9fb68c",
# "key" : {
# "arch" : "arm64",
# "compiler" : "Clang",
# },
# "results" : [
# {
# "key" : {
# "config" : "vk",
# "name" : "yuv_nv12_to_rgb_effect",
# "source_type" : "gm"
# },
# "md5" : "7db34da246868d50ab9ddd776ce6d779",
# "options" : {
# "ext" : "png",
# "gamma_correct" : "no"
# }
# },
# {
# "key" : {
# "config" : "vk",
# "name" : "yuv_to_rgb_effect",
# "source_type" : "gm"
# },
# "md5" : "0b955f387740c66eb23bf0e253c80d64",
# "options" : {
# "ext" : "png",
# "gamma_correct" : "no"
# }
# }
# ],
# }
#
class GoldResults(object):
def __init__(self, source_type, outputDir, propertiesStr, keyStr,
ignore_hashes_file):
"""
source_type is the source_type (=corpus) field used for all results.
output_dir is the directory where the resulting images are copied and
the dm.json file is written.
propertiesStr is a string with space separated key/value pairs that
is used to set the top level fields in the output JSON file.
keyStr is a string with space separated key/value pairs that
is used to set the 'key' field in the output JSON file.
ignore_hashes_file is a file that contains a list of image hashes
that should be ignored.
"""
self._source_type = source_type
self._properties = self._parseKeyValuePairs(propertiesStr)
self._properties["key"] = self._parseKeyValuePairs(keyStr)
self._results = []
self._outputDir = outputDir
# make sure the output directory exists.
if not os.path.exists(outputDir):
os.makedirs(outputDir)
self._ignore_hashes = set()
if ignore_hashes_file:
with open(ignore_hashes_file, 'r') as ig_file:
hashes=[x.strip() for x in ig_file.readlines() if x.strip()]
self._ignore_hashes = set(hashes)
def AddTestResult(self, testName, md5Hash, outputImagePath):
# If the hash is in the list of hashes to ignore then we don'try
# make a copy, but add it to the result.
imgExt = os.path.splitext(outputImagePath)[1].lstrip(".")
if md5Hash not in self._ignore_hashes:
# Copy the image to <output_dir>/<md5Hash>.<image_extension>
if not imgExt:
raise ValueError("File %s does not have an extension" % outputImagePath)
newFilePath = os.path.join(self._outputDir, md5Hash + '.' + imgExt)
shutil.copy2(outputImagePath, newFilePath)
# Add an entry to the list of test results
self._results.append({
"key": {
"name": testName,
"source_type": self._source_type,
},
"md5": md5Hash,
"options": {
"ext": imgExt,
"gamma_correct": "no"
}
})
def _parseKeyValuePairs(self, kvStr):
kvPairs = shlex.split(kvStr)
if len(kvPairs) % 2:
raise ValueError("Uneven number of key/value pairs. Got %s" % kvStr)
return { kvPairs[i]:kvPairs[i+1] for i in range(0, len(kvPairs), 2) }
def WriteResults(self):
self._properties.update({
"results": self._results
})
outputFileName = os.path.join(self._outputDir, "dm.json")
with open(outputFileName, 'wb') as outfile:
json.dump(self._properties, outfile, indent=1)
outfile.write("\n")
# Produce example output for manual testing.
if __name__ == "__main__":
# Create a test directory with three empty 'image' files.
testDir = "./testdirectory"
if not os.path.exists(testDir):
os.makedirs(testDir)
open(os.path.join(testDir, "image1.png"), 'wb').close()
open(os.path.join(testDir, "image2.png"), 'wb').close()
open(os.path.join(testDir, "image3.png"), 'wb').close()
# Create an instance and add results.
propStr = """build_number 2 "builder name" Builder-Name gitHash a4a338179013b029d6dd55e737b5bd648a9fb68c"""
keyStr = "arch arm64 compiler Clang configuration Debug"
hash_file = os.path.join(testDir, "ignore_hashes.txt")
with open(hash_file, 'wb') as f:
f.write("\n".join(["hash-1","hash-4"]) + "\n")
gr = GoldResults("pdfium", testDir, propStr, keyStr, hash_file)
gr.AddTestResult("test-1", "hash-1", os.path.join(testDir, "image1.png"))
gr.AddTestResult("test-2", "hash-2", os.path.join(testDir, "image2.png"))
gr.AddTestResult("test-3", "hash-3", os.path.join(testDir, "image3.png"))
gr.WriteResults()
|
awacha/cct
|
cct/qtgui2/tools/capillarysizer/capillarysizer.py
|
import logging
from typing import Optional, Tuple
from PyQt5 import QtWidgets
from matplotlib.axes import Axes, np
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT, FigureCanvasQTAgg
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from .capillarysizer_ui import Ui_Form
from ...utils.window import WindowRequiresDevices
from ....core2.algorithms.peakfit import fitpeak, PeakType
from ....core2.dataclasses import Scan
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class CapillarySizer(QtWidgets.QWidget, WindowRequiresDevices, Ui_Form):
figure: Figure
figtoolbar: NavigationToolbar2QT
canvas: FigureCanvasQTAgg
axes: Axes
scan: Optional[Scan] = None
positive: Tuple[float, float] = (.0, .0)
negative: Tuple[float, float] = (.0, .0)
line: Line2D = None
positivepeakline: Optional[Line2D] = None
negativepeakline: Optional[Line2D] = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.setupUi(self)
def setupUi(self, Form):
super().setupUi(Form)
self.figure = Figure(constrained_layout=True)
self.canvas = FigureCanvasQTAgg(self.figure)
self.figtoolbar = NavigationToolbar2QT(self.canvas, self)
self.figureVerticalLayout.addWidget(self.canvas)
self.figureVerticalLayout.addWidget(self.figtoolbar)
self.axes = self.figure.add_subplot(self.figure.add_gridspec(1, 1)[:, :])
self.canvas.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
self.canvas.draw_idle()
self.scanIndexSpinBox.valueChanged.connect(self.scanIndexChanged)
self.signalNameComboBox.currentIndexChanged.connect(self.signalNameChanged)
self.fitNegativeToolButton.clicked.connect(self.fitPeak)
self.fitPositiveToolButton.clicked.connect(self.fitPeak)
self.sampleNameComboBox.currentIndexChanged.connect(self.sampleChanged)
self.sampleNameComboBox.setModel(self.instrument.samplestore.sortedmodel)
self.negativeValDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.positiveValDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.negativeErrDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.positiveErrDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.updateCenterToolButton.clicked.connect(self.saveCenter)
self.updateThicknessToolButton.clicked.connect(self.saveThickness)
self.updateCenterToolButton.setIcon(
QtWidgets.QApplication.instance().style().standardIcon(QtWidgets.QStyle.SP_ArrowRight))
self.updateThicknessToolButton.setIcon(
QtWidgets.QApplication.instance().style().standardIcon(QtWidgets.QStyle.SP_ArrowRight))
self.instrument.scan.lastscanchanged.connect(self.onLastScanChanged)
if self.instrument.scan.firstscan() is None:
self.scanIndexSpinBox.setEnabled(False)
else:
self.scanIndexSpinBox.setRange(self.instrument.scan.firstscan(), self.instrument.scan.lastscan())
self.derivativeToolButton.toggled.connect(self.signalNameChanged)
if self.instrument.scan.lastscan() is not None:
self.scanIndexSpinBox.setValue(self.instrument.scan.lastscan())
self.signalNameComboBox.setCurrentIndex(0)
self.reloadToolButton.clicked.connect(self.signalNameChanged)
self.replot()
def fitPeak(self):
if self.line is None:
return
x = self.line.get_xdata()
y = self.line.get_ydata()
xmin, xmax, ymin, ymax = self.axes.axis()
idx = np.logical_and(
np.logical_and(x >= xmin, x <= xmax),
np.logical_and(y >= ymin, y <= ymax)
)
if self.sender() == self.fitNegativeToolButton:
y = -y
try:
# do not use y error bars: if y<0, y**0.5 is NaN, which will break the fitting routine
pars, covar, peakfunc = fitpeak(x[idx], y[idx], None, None, PeakType.Lorentzian)
except ValueError as ve:
QtWidgets.QMessageBox.critical(self, 'Error while fitting',
f'Cannot fit peak, please try another range. The error message was: {ve}')
return
logger.debug(f'Peak parameters: {pars}')
logger.debug(f'Covariance matrix: {covar}')
xfit = np.linspace(x[idx].min(), x[idx].max(), 100)
yfit = peakfunc(xfit)
if self.sender() == self.fitNegativeToolButton:
if self.negativepeakline is None:
self.negativepeakline = self.axes.plot(xfit, - yfit, 'b-', lw=3)[0]
else:
self.negativepeakline.set_xdata(xfit)
self.negativepeakline.set_ydata(-yfit)
self.negative = (pars[1], covar[1, 1] ** 0.5)
self.negativeValDoubleSpinBox.blockSignals(True)
self.negativeErrDoubleSpinBox.blockSignals(True)
self.negativeValDoubleSpinBox.setValue(pars[1])
self.negativeErrDoubleSpinBox.setValue(covar[1, 1] ** 0.5)
self.negativeValDoubleSpinBox.blockSignals(False)
self.negativeErrDoubleSpinBox.blockSignals(False)
else:
if self.positivepeakline is None:
self.positivepeakline = self.axes.plot(xfit, yfit, 'r-', lw=3)[0]
else:
self.positivepeakline.set_xdata(xfit)
self.positivepeakline.set_ydata(yfit)
self.positive = (pars[1], covar[1, 1] ** 0.5)
self.positiveValDoubleSpinBox.blockSignals(True)
self.positiveErrDoubleSpinBox.blockSignals(True)
self.positiveValDoubleSpinBox.setValue(pars[1])
self.positiveErrDoubleSpinBox.setValue(covar[1, 1] ** 0.5)
self.positiveValDoubleSpinBox.blockSignals(False)
self.positiveErrDoubleSpinBox.blockSignals(False)
self.canvas.draw_idle()
self.recalculate()
def onLastScanChanged(self):
if self.instrument.scan.firstscan() is not None:
self.scanIndexSpinBox.setMaximum(self.instrument.scan.lastscan())
self.scanIndexSpinBox.setMinimum(self.instrument.scan.firstscan())
self.scanIndexSpinBox.setEnabled(True)
else:
self.scanIndexSpinBox.setEnabled(False)
def setValuesFromSpinboxes(self):
self.positive = (self.positiveValDoubleSpinBox.value(), self.positiveErrDoubleSpinBox.value())
self.negative = (self.negativeValDoubleSpinBox.value(), self.negativeErrDoubleSpinBox.value())
self.recalculate()
def recalculate(self):
positionval = 0.5 * (self.positive[0] + self.negative[0])
positionerr = 0.5 * (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
thicknessval = abs(self.positive[0] - self.negative[0])
thicknesserr = (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
self.newPositionLabel.setText(f'{positionval:.4f} \xb1 {positionerr:.4f}')
self.newThicknessLabel.setText(f'{thicknessval:.4f} \xb1 {thicknesserr:.4f} mm')
def sampleChanged(self):
if self.sampleNameComboBox.currentIndex() < 0:
return
sample = self.instrument.samplestore[self.sampleNameComboBox.currentText()]
if self.instrument.samplestore.hasMotors() and (self.scan is not None):
if self.instrument.samplestore.xmotorname() == self.scan.motorname:
self.oldPositionLabel.setText(f'{sample.positionx[0]:.4f} \xb1 {sample.positionx[1]:.4f}')
elif self.instrument.samplestore.ymotorname() == self.scan.motorname:
self.oldPositionLabel.setText(f'{sample.positiony[0]:.4f} \xb1 {sample.positiony[1]:.4f}')
self.oldThicknessLabel.setText(f'{sample.thickness[0] * 10.0:.4f} \xb1 {sample.thickness[1] * 10.0:.4f} mm')
def scanIndexChanged(self, value: int):
self.scan = self.instrument.scan[value]
self.signalNameComboBox.blockSignals(True)
oldsignal = self.signalNameComboBox.currentText()
self.signalNameComboBox.clear()
self.signalNameComboBox.addItems(self.scan.columnnames[2:])
self.signalNameComboBox.setCurrentIndex(self.signalNameComboBox.findText(oldsignal))
self.signalNameComboBox.blockSignals(False)
self.signalNameChanged()
def signalNameChanged(self):
if self.signalNameComboBox.currentIndex() >= 0:
self.replot()
self.figtoolbar.update()
def replot(self):
if self.scan is None:
return
if self.positivepeakline is not None:
self.positivepeakline.remove()
self.positivepeakline = None
if self.negativepeakline is not None:
self.negativepeakline.remove()
self.negativepeakline = None
x = self.scan[self.scan.motorname]
y = self.scan[self.signalNameComboBox.currentText()]
if self.derivativeToolButton.isChecked():
y = (y[1:] - y[:-1]) / (x[1:] - x[:-1])
x = 0.5 * (x[1:] + x[:-1])
if self.line is None:
self.line = self.axes.plot(x, y, 'k.-')[0]
else:
self.line.set_xdata(x)
self.line.set_ydata(y)
self.axes.relim()
self.axes.autoscale(True)
self.axes.set_xlabel(self.scan.motorname)
self.axes.set_ylabel(
'Derivative of ' + self.signalNameComboBox.currentText()
if self.derivativeToolButton.isChecked() else self.signalNameComboBox.currentText())
self.axes.set_title(self.scan.comment)
self.axes.grid(True, which='both')
self.canvas.draw_idle()
def saveCenter(self):
positionval = 0.5 * (self.positive[0] + self.negative[0])
positionerr = 0.5 * (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
if not self.instrument.samplestore.hasMotors():
# ask the user which direction this is
msgbox = QtWidgets.QMessageBox(self.window())
msgbox.setIcon(QtWidgets.QMessageBox.Question)
msgbox.setWindowTitle('Select direction')
msgbox.setText('Please select X or Y direction to save the determined sample center to:')
btnX=msgbox.addButton('X', QtWidgets.QMessageBox.YesRole)
btnY=msgbox.addButton('Y', QtWidgets.QMessageBox.NoRole)
msgbox.addButton(QtWidgets.QMessageBox.Cancel)
result = msgbox.exec_()
logger.debug(f'{result=}')
if msgbox.clickedButton() == btnX:
xcoordinate = True
elif msgbox.clickedButton() == btnY:
xcoordinate = False
else:
xcoordinate = None
elif self.instrument.samplestore.xmotorname() == self.scan.motorname:
xcoordinate = True
elif self.instrument.samplestore.ymotorname() == self.scan.motorname:
xcoordinate = False
else:
xcoordinate = None
if xcoordinate is None:
return
else:
try:
self.instrument.samplestore.updateSample(self.sampleNameComboBox.currentText(),
'positionx' if xcoordinate else 'positiony',
(positionval, positionerr))
logger.info(
f'Updated {"X" if xcoordinate else "Y"} '
f'position of sample {self.sampleNameComboBox.currentText()} to {positionval:.4f} \xb1 {positionerr:.4f}.')
except ValueError:
QtWidgets.QMessageBox.critical(
self, 'Parameter locked',
f'Cannot set position for sample {self.sampleNameComboBox.currentText()}: this parameter has been set read-only!')
self.sampleChanged()
def saveThickness(self):
thicknessval = abs(self.positive[0] - self.negative[0])
thicknesserr = (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
sample = self.instrument.samplestore[self.sampleNameComboBox.currentText()]
try:
sample.thickness = (thicknessval / 10, thicknesserr / 10)
except ValueError:
QtWidgets.QMessageBox.critical(
self, 'Parameter locked',
f'Cannot set thickness for sample {sample.title}: this parameter has been set read-only!')
return
self.instrument.samplestore.updateSample(sample.title, 'thickness', sample.thickness)
logger.info(
f'Updated thickness of sample {sample.title} to {sample.thickness[0]:.4f} \xb1 {sample.thickness[1]:.4f} cm.')
self.sampleChanged()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.