code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from setuptools import setup
setup(
name='stratosphere',
version='0.1.0',
description='AWS CloudFormation orchestration library',
author='Rob Zienert',
author_email='rob@robzienert.com',
license='MIT License',
packages=['stratosphere'],
setup_requires=['pep8', 'pyflakes', 'moto'],
install_requires=['toposort', 'requests', 'boto'],
scripts=[],
test_suite='tests',
tests_require=[],
use_2to3=True
)
|
robzienert/stratosphere
|
setup.py
|
Python
|
mit
| 452
|
# -*- coding: utf-8 -*-
""" RESTful Search Methods
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import re
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.serializers import json as jsons
from gluon.storage import Storage
from s3crud import S3CRUD
from s3navigation import s3_search_tabs
from s3utils import s3_debug, S3DateTime, s3_get_foreign_key
from s3validators import *
from s3widgets import S3OrganisationHierarchyWidget, s3_grouped_checkboxes_widget
from s3resource import S3FieldSelector
__all__ = ["S3SearchWidget",
"S3SearchSimpleWidget",
"S3SearchMinMaxWidget",
"S3SearchOptionsWidget",
"S3SearchLocationWidget",
"S3SearchSkillsWidget",
"S3SearchOrgHierarchyWidget",
"S3Search",
"S3LocationSearch",
"S3OrganisationSearch",
"S3PersonSearch",
"S3HRSearch",
"S3PentitySearch",
]
MAX_RESULTS = 1000
MAX_SEARCH_RESULTS = 200
# =============================================================================
class S3SearchWidget(object):
"""
Search Widget for interactive search (base class)
"""
def __init__(self, field=None, name=None, **attr):
"""
Configures the search option
@param field: name(s) of the fields to search in
@param name: ?
@keyword label: a label for the search widget
@keyword comment: a comment for the search widget
"""
self.other = None
self.field = field
if not self.field:
raise SyntaxError("No search field specified.")
self.attr = Storage(attr)
if name is not None:
self.attr["_name"] = name
self.master_query = None
self.search_field = None
# -------------------------------------------------------------------------
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
self.attr = Storage(attr)
raise NotImplementedError
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def build_master_query(self, resource):
"""
Get the master query for the specified field(s)
"""
db = current.db
table = resource.table
components = resource.components
accessible_query = resource.accessible_query
master_query = Storage()
search_field = Storage()
fields = self.field
if fields and not isinstance(fields, (list, tuple)):
fields = [fields]
# Find the tables, joins and fields to search in
for f in fields:
ktable = None
rtable = None
component = None
reference = None
multiple = False
if f.find(".") != -1: # Component
cname, f = f.split(".", 1)
if cname not in components:
continue
else:
component = components[cname]
ktable = component.table
ktablename = component.tablename
pkey = component.pkey
fkey = component.fkey
# Do not add queries for empty tables
if not db(ktable.id > 0).select(ktable.id,
limitby=(0, 1)).first():
continue
else: # this resource
ktable = table
ktablename = table._tablename
if f.find("$") != -1: # Referenced object
rkey, f = f.split("$", 1)
if not rkey in ktable.fields:
continue
rtable = ktable
rtablename = ktablename
ktablename, key, multiple = s3_get_foreign_key(ktable[rkey])
if not ktablename:
continue
else:
ktable = db[ktablename]
# Do not add queries for empty tables
if not db(ktable.id > 0).select(ktable.id,
limitby=(0, 1)).first():
continue
# Master queries
# @todo: update this for new QueryBuilder (S3ResourceFilter)
if ktable and ktablename not in master_query:
query = (accessible_query("read", ktable))
if "deleted" in ktable.fields:
query = (query & (ktable.deleted == "False"))
join = None
if reference:
if ktablename != rtablename:
q = (accessible_query("read", rtable))
if "deleted" in rtable.fields:
q = (q & (rtable.deleted == "False"))
else:
q = None
if multiple:
j = (rtable[rkey].contains(ktable.id))
else:
j = (rtable[rkey] == ktable.id)
if q is not None:
join = q & j
else:
join = j
j = None
if component:
if reference:
q = (accessible_query("read", table))
if "deleted" in table.fields:
q = (q & (table.deleted == "False"))
j = (q & (table[pkey] == rtable[fkey]))
else:
j = (table[pkey] == ktable[fkey])
if j is not None and join is not None:
join = (join & j)
elif j:
join = j
if join is not None:
query = (query & join)
master_query[ktable._tablename] = query
# Search fields
if ktable and f in ktable.fields:
if ktable._tablename not in search_field:
search_field[ktablename] = [ktable[f]]
else:
search_field[ktablename].append(ktable[f])
self.master_query = master_query
self.search_field = search_field
# =============================================================================
class S3SearchSimpleWidget(S3SearchWidget):
"""
Simple full-text search widget
"""
def widget(self,
resource,
vars=None,
name=None,
value=None,
autocomplete=None):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
attr = self.attr
# SearchAutocomplete must set name depending on the field
if name:
attr.update(_name=name)
if "_size" not in attr:
attr.update(_size="40")
if "_name" not in attr:
attr.update(_name="%s_search_simple" % resource.name)
if "_id" not in attr:
attr.update(_id="%s_search_simple" % resource.name)
if autocomplete:
attr.update(_autocomplete=autocomplete)
attr.update(_type="text")
self.name = attr._name
# Search Autocomplete - Display current value
attr["_value"] = value
return INPUT(**attr)
# -------------------------------------------------------------------------
def query(self, resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
# Build the query
if value and isinstance(value, str):
values = value.split()
final_query = None
# Create a set of queries for each value
for value in values:
field_queries = None
# Create a set of queries that test the current
# value against each field
for field in self.field:
s = S3FieldSelector(field).lower()
field_query = s.like("%%%s%%" % value.lower())
# We want a match against any field
if field_queries:
field_queries = field_query | field_queries
else:
field_queries = field_query
# We want all values to be matched
if final_query:
final_query = field_queries & final_query
else:
final_query = field_queries
return final_query
else:
return None
# =============================================================================
class S3SearchMinMaxWidget(S3SearchWidget):
"""
Min/Max search widget for numeric fields
"""
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
T = current.T
settings = current.deployment_settings
self.names = []
attr = self.attr
self.method = attr.get("method", "range")
select_min = self.method in ("min", "range")
select_max = self.method in ("max", "range")
self.widmin = Storage()
self.widmax = Storage()
if not self.search_field:
self.build_master_query(resource)
search_field = self.search_field.values()
if not search_field:
return SPAN(T("no options available"),
_class="no-options-available")
search_field = search_field[0][0]
ftype = str(search_field.type)
input_min = input_max = None
if ftype == "integer":
requires = IS_EMPTY_OR(IS_INT_IN_RANGE())
elif ftype == "date":
attr.update(_class="date")
requires = IS_EMPTY_OR(IS_DATE(format=settings.get_L10n_date_format()))
elif ftype == "time":
attr.update(_class="time")
requires = IS_EMPTY_OR(IS_TIME())
elif ftype == "datetime":
attr.update(_class="datetime")
requires = IS_EMPTY_OR(IS_DATETIME(format=settings.get_L10n_datetime_format()))
else:
raise SyntaxError("Unsupported search field type")
attr.update(_type="text")
trl = TR(_class="sublabels")
tri = TR()
# dictionaries for storing details of the input elements
name = attr["_name"]
self.widmin = dict(name="%s_min" % name,
label=T("min"),
requires=requires,
attributes=attr)
self.widmax = dict(name="%s_max" % name,
label=T("max"),
requires=requires,
attributes=attr)
if select_min:
min_label = self.widget_label(self.widmin)
min_input = self.widget_input(self.widmin)
self.names.append(self.widmin["name"])
trl.append(min_label)
tri.append(min_input)
if select_max:
max_label = self.widget_label(self.widmax)
max_input = self.widget_input(self.widmax)
self.names.append(self.widmax["name"])
trl.append(max_label)
tri.append(max_input)
w = TABLE(trl, tri, _class="s3searchminmaxwidget")
return w
# -------------------------------------------------------------------------
@staticmethod
def widget_label(widget):
"""
@param widget: dict with the name, label, requires and
attributes for the input element
@return: LABEL
"""
return LABEL(widget["label"], _for="id-%s" % widget["name"])
# -------------------------------------------------------------------------
@staticmethod
def widget_input(widget):
"""
@param widget: dict with the name, label, requires and
attributes for the input element
@return: INPUT
"""
attr = widget["attributes"].copy()
attr.update(_name=widget["name"],
_id="id-%s" % widget["name"])
return INPUT(requires=widget["requires"], **attr)
# -------------------------------------------------------------------------
def validate(self, resource, value):
"""
Validate the input values of the widget
"""
T = current.T
errors = dict()
select_min = self.method in ("min", "range")
select_max = self.method in ("max", "range")
if select_min and select_max:
vmin = value.get(self.widmin["name"], None)
vmax = value.get(self.widmax["name"], None)
if vmax is not None and vmin is not None and vmin > vmax:
errors[self.widmax["name"]] = \
T("Maximum must be greater than minimum")
return errors or None
# -------------------------------------------------------------------------
def query(self, resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
select_min = self.method in ("min", "range")
select_max = self.method in ("max", "range")
min_query = max_query = query = None
if select_min:
v = value.get(self.widmin["name"], None)
if v is not None and str(v):
min_query = S3FieldSelector(self.field) >= v
if select_max:
v = value.get(self.widmax["name"], None)
if v is not None and str(v):
max_query = S3FieldSelector(self.field) <= v
if min_query is not None:
query = min_query
if max_query is not None:
query = query & max_query
else:
query = max_query
return query
# =============================================================================
class S3SearchOptionsWidget(S3SearchWidget):
"""
Option select widget for option or boolean fields
Displays a search widget which allows the user to search for records
with fields matching a certain criteria.
Field must be an integer or reference to work on all versions of
gluon/sqlhtml.py
@param represent: If the field is a reference, represent can pass a
formatting string with mapping fields to the
referenced record.
@param cols: The number of columns which the options will be
displayed in
"""
def __init__(self, field=None, name=None, options=None, null=False, **attr):
"""
Configures the search option
@param field: name(s) of the fields to search in
@param name: used to build the HTML ID of the widget
@param options: either a value:label dictionary to populate the
search widget or a callable to create this
@param null: False if no null value to be included in the options,
otherwise a LazyT for the label
@keyword label: a label for the search widget
@keyword location_level: If-specified then generate a label when
rendered based on the current hierarchy
@keyword comment: a comment for the search widget
"""
super(S3SearchOptionsWidget, self).__init__(field, name, **attr)
self.options = options
self.null = null
# -------------------------------------------------------------------------
def _get_reference_resource(self, resource):
"""
If the field is entered as kfield$field, will search field in the
the referenced resource.
"""
field_name = self.field
kfield_name = None
if field_name.find("$") != -1:
kfield_name, field_name = field_name.split("$")
tablename = resource.table[kfield_name].type[10:]
prefix, resource_name = tablename.split("_", 1)
resource = current.manager.define_resource(prefix,
resource_name)
return resource, field_name, kfield_name
# -------------------------------------------------------------------------
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
T = current.T
#resource, field_name, kfield_name = self._get_reference_resource(resource)
field_name = self.field
attr = self.attr
self.name = attr.pop("_name",
"%s_search_select_%s" % (resource.name,
field_name))
if "location_level" in attr:
# This is searching a Location Hierarchy, so lookup the label now
level = attr["location_level"]
hierarchy = current.gis.get_location_hierarchy()
if level in hierarchy:
label = hierarchy[level]
else:
label = level
attr["label"] = label
# Populate the field value from the GET parameter
if vars and self.name in vars:
value = vars[self.name]
else:
value = None
fs = S3FieldSelector(field_name)
fl = fs.resolve(resource)
field = fl.field
# Check the field type
if field is not None:
field_type = str(field.type)
else:
field_type = "virtual"
if self.options is not None:
# Custom dict of options {value: label} or callable
if isinstance(self.options, dict):
options = self.options
elif callable(self.options):
options = self.options()
opt_values = options.keys()
else:
if field_type == "boolean":
opt_values = (True, False)
else:
opt_values = []
rows = resource.sqltable(fields=[field_name],
start=None,
limit=None,
as_rows=True)
if rows:
if field_type.startswith("list"):
for row in rows:
fk_list = row[field]
if fk_list != None:
try:
fkeys = fk_list.split("|")
except:
fkeys = fk_list
for fkey in fkeys:
if fkey not in opt_values:
opt_values.append(fkey)
else:
opt_values = list(set([row[field]
for row in rows
if row[field] is not None]))
if len(opt_values) < 2:
msg = attr.get("_no_opts", T("No options available"))
return SPAN(msg, _class="no-options-available")
if self.options is None:
opt_list = []
# Always use the represent of the widget, if present
represent = attr.represent
# Fallback to the field's represent
if not represent or field_type[:9] != "reference":
represent = field.represent
if callable(represent):
# Execute, if callable
if "show_link" in represent.func_code.co_varnames:
opt_list = [(opt_value, represent(opt_value, show_link=False)) for opt_value
in opt_values]
else:
opt_list = [(opt_value, represent(opt_value)) for opt_value
in opt_values]
elif isinstance(represent, str) and field_type[:9] == "reference":
# Feed the format string
# Use the represent string to reduce db calls
# Find the fields which are needed to represent:
db = current.db
ktable = db[field_type[10:]]
fieldnames = ["id"]
fieldnames += re.findall("%\(([a-zA-Z0-9_]*)\)s", represent)
represent_fields = [ktable[fieldname] for fieldname in fieldnames]
query = (ktable.id.belongs(opt_values)) & (ktable.deleted == False)
represent_rows = db(query).select(*represent_fields).as_dict(key=represent_fields[0].name)
opt_list = []
for opt_value in opt_values:
opt_represent = represent % represent_rows[opt_value]
if opt_represent:
opt_list.append([opt_value, opt_represent])
else:
# Straight string representations of the values
opt_list = [(opt_value, "%s" % opt_value) for opt_value in opt_values if opt_value]
options = dict(opt_list)
# Dummy field
dummy_field = Storage(name=self.name,
type=field_type,
requires=IS_IN_SET(options,
multiple=True))
# on many-to-many fields the user can search for records containing
# all the options or any of the options.
if len(options) > 1 and field_type[:4] == "list":
self.filter_type = vars.get("%s_filter" % self.name, "any")
any_all = DIV(
T("Filter type "),
INPUT(_name="%s_filter" % self.name,
_id="%s_filter_all" % self.name,
_type="radio",
_value="all",
value=self.filter_type),
LABEL(T("All"),
_for="%s_filter_all" % self.name),
INPUT(_name="%s_filter" % self.name,
_id="%s_filter_any" % self.name,
_type="radio",
_value="any",
value=self.filter_type),
LABEL(T("Any"),
_for="%s_filter_any" % self.name),
_class="s3-checkboxes-widget-filter"
)
else:
any_all = ""
return TAG[""](any_all,
s3_grouped_checkboxes_widget(dummy_field,
value,
**attr))
# -------------------------------------------------------------------------
def query(self, resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
field_name = self.field
if value:
if not isinstance(value, (list, tuple)):
value = [value]
fs = S3FieldSelector(field_name)
fl = fs.resolve(resource)
try:
#table_field = resource.table[field_name]
table_field = fl.field
except:
table_field = None
# What do we do if we need to search within a virtual field
# that is a list:* ?
if table_field and str(table_field.type).startswith("list"):
query = None
if self.filter_type == "any":
query = S3FieldSelector(field_name).anyof(value)
else:
query = S3FieldSelector(field_name).contains(value)
elif "None" in value:
# Needs special handling (doesn't show up in 'belongs')
query = S3FieldSelector(field_name) == None
opts = [v for v in value if v != "None"]
if opts:
query = query | S3FieldSelector(field_name).belongs(opts)
else:
query = S3FieldSelector(field_name).belongs(value)
return query
else:
return None
# =============================================================================
class S3SearchLocationWidget(S3SearchWidget):
"""
Interactive location search widget
- allows the user to select a BBOX & have only results from within
that BBOX returned
@ToDo: Have an option to use a Circular Radius
http://openlayers.org/dev/examples/regular-polygons.html
@ToDo: Have an option to use a full Polygon
Hard to process this as a resource filter
"""
def __init__(self,
field="location_id",
name=None, # Needs to be specified by caller
**attr):
"""
Initialise parent class & make any necessary modifications
"""
S3SearchWidget.__init__(self, field, name, **attr)
# -------------------------------------------------------------------------
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
format = current.auth.permission.format
if format == "plain":
return None
try:
from shapely.wkt import loads as wkt_loads
except ImportError:
s3_debug("WARNING: %s: Shapely GIS library not installed" % __name__)
return None
T = current.T
# Components
if "comment" not in self.attr:
self.attr.update(comment=T("Draw a square to limit the results to just those within the square."))
#self.attr.update(comment="%s|%s|%s" % (T("Draw a Polygon around the area to which you wish to restrict your search."),
# T("Click on the map to add the points that make up your polygon. Double-click to finish drawing."),
# T("To activate Freehand mode, hold down the shift key.")))
self.comment = self.attr.comment
# Hidden Field to store the Polygon value in
polygon_input = INPUT(_id="gis_search_polygon_input",
_name=self.attr._name,
_class="hide")
# Map Popup
# - not added as we reuse the one that comes with dataTables
# Button to open the Map
OPEN_MAP = T("Open Map")
map_button = A(OPEN_MAP,
_style="cursor:pointer; cursor:hand",
_id="gis_search_map-btn")
# Settings to be read by static/scripts/S3/s3.gis.js
js_location_search = """S3.gis.draw_polygon = true;"""
# The overall layout of the components
return TAG[""](
polygon_input,
map_button,
#map_popup,
SCRIPT(js_location_search)
)
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget: WKT format
"""
if value:
# @ToDo:
# if current.deployment_settings.get_gis_spatialdb():
# # Use PostGIS-optimised routine
# query = (S3FieldSelector("location_id$the_geom").st_intersects(value))
# else:
from shapely.wkt import loads as wkt_loads
try:
shape = wkt_loads(value)
except:
s3_debug("WARNING: S3Search: Invalid WKT")
return None
bounds = shape.bounds
lon_min = bounds[0]
lat_min = bounds[1]
lon_max = bounds[2]
lat_max = bounds[3]
# Return all locations which have a part of themselves inside the BBOX
# This requires the locations to have their bounds set properly
# This can be done globally using:
# gis.update_location_tree()
query = (S3FieldSelector("location_id$lat_min") <= lat_max) & \
(S3FieldSelector("location_id$lat_max") >= lat_min) & \
(S3FieldSelector("location_id$lon_min") <= lon_max) & \
(S3FieldSelector("location_id$lon_max") >= lon_min)
return query
else:
return None
# =============================================================================
class S3SearchCredentialsWidget(S3SearchOptionsWidget):
"""
Options Widget to search for HRMs with specified Credentials
"""
def widget(self, resource, vars):
c = current.manager.define_resource("hrm", "credential")
return S3SearchOptionsWidget.widget(self, c, vars)
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
if value:
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ctable = s3db.hrm_credential
query = (htable.person_id == ptable.id) & \
(htable.deleted != True) & \
(ctable.person_id == ptable.id) & \
(ctable.deleted != True) & \
(ctable.job_role_id.belongs(value))
return query
else:
return None
# =============================================================================
class S3SearchSkillsWidget(S3SearchOptionsWidget):
"""
Options Widget to search for HRMs with specified Skills
@ToDo: Provide a filter for confirmed/unconfirmed only
(latter useful to see who needs confirming)
@ToDo: Provide a filter for level of competency
- meanwhile at least sort by level of competency
"""
# -------------------------------------------------------------------------
def widget(self, resource, vars):
c = current.manager.define_resource("hrm", "competency")
return S3SearchOptionsWidget.widget(self, c, vars)
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
if value:
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ctable = s3db.hrm_competency
query = (htable.person_id == ptable.id) & \
(htable.deleted != True) & \
(ctable.person_id == ptable.id) & \
(ctable.deleted != True) & \
(ctable.skill_id.belongs(value))
return query
else:
return None
# =============================================================================
class S3Search(S3CRUD):
"""
RESTful Search Method for S3Resources
"""
def __init__(self, simple=None, advanced=None, any=False, **args):
"""
Constructor
@param simple: the widgets for the simple search form as list
@param advanced: the widgets for the advanced search form as list
@param any: match "any of" (True) or "all of" (False) the options
in advanced search
"""
S3CRUD.__init__(self)
args = Storage(args)
if simple is None:
if "field" in args:
if "name" in args:
name = args.name
elif "_name" in args:
name = args._name
else:
name = "search_simple"
simple = S3SearchSimpleWidget(field=args.field,
name=name,
label=args.label,
comment=args.comment)
# Create a list of Simple search form widgets, by name,
# and throw an error if a duplicate is found
names = []
self.simple = []
if not isinstance(simple, (list, tuple)):
simple = [simple]
for widget in simple:
if widget is not None:
name = widget.attr._name
if name in names:
raise SyntaxError("Duplicate widget: %s") % name
# Widgets should be able to have default names
# elif not name:
# raise SyntaxError("Widget with no name")
else:
self.simple.append((name, widget))
names.append(name)
# Create a list of Advanced search form widgets, by name,
# and throw an error if a duplicate is found
names = []
self.advanced = []
append = self.advanced.append
if not isinstance(advanced, (list, tuple)):
advanced = [advanced]
for widget in advanced:
if widget is not None:
name = widget.attr._name
if name in names:
raise SyntaxError("Duplicate widget: %s" % name)
# Widgets should be able to have default names
# elif not name:
# raise SyntaxError("Widget with no name")
else:
append((name, widget))
names.append(name)
self.__any = any
if self.simple or self.advanced:
self.__interactive = True
else:
self.__interactive = False
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply search method to S3Requests
@param r: the S3Request
@param attr: request attributes
"""
format = r.representation
output = dict()
if r.component and self != self.resource.search:
output = self.resource.search(r, **attr)
# Autocomplete-Widget
elif "is_autocomplete" in attr:
output = self.search_autocomplete(r, **attr)
# Save search
elif "save" in r.vars :
r.interactive = False
output = self.save_search(r, **attr)
# Interactive or saved search
elif "load" in r.vars or \
r.interactive and self.__interactive:
output = self.search_interactive(r, **attr)
# SSPag response => CRUD native
elif format == "aadata" and self.__interactive:
output = self.select(r, **attr)
# JSON search
elif format == "json":
output = self.search_json(r, **attr)
# Autocomplete-JSON search
elif format == "acjson":
output = self.search_json_autocomplete(r, **attr)
# Search form for popup on Map Layers
elif format == "plain":
output = self.search_interactive(r, **attr)
# Not supported
else:
r.error(501, current.manager.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
@staticmethod
def _build_widget_query(resource, name, widget, form, query):
"""
@todo: docstring
"""
errors = None
if hasattr(widget, "names"):
value = Storage([(name, form.vars[name])
for name in widget.names
if name in form.vars])
elif name in form.vars:
value = form.vars[name]
else:
value = None
if hasattr(widget, "validate"):
errors = widget.validate(resource, value)
if not errors:
q = widget.query(resource, value)
if q is not None:
if query is None:
query = q
else:
query = query & q
return (query, errors)
# -------------------------------------------------------------------------
def save_search_widget(self, r, search_vars, **attr):
"""
Add a widget to a Search form to allow saving this search to the
user's profile, to which they can subscribe
"""
T = current.T
db = current.db
request = self.request
user_id = current.session.auth.user.id
now = request.utcnow.microsecond
save_search_btn_id = "save_my_filter_btn_%s" % now
save_search_processing_id = "save_search_processing_%s" % now
save_search_a_id = "save_search_a_%s" % now
arg = "%s/save_search" % user_id
save_search_a = DIV(T("View and Subscribe to Saved Searches"),
A(T("Here"),
_href=URL(r=request, c="pr", f="person",
args=[arg]),
_target="_blank"
),
".",
_id=save_search_a_id,
_class="save_search_a"
)
search_vars["prefix"] = r.controller
search_vars["function"] = r.function
table = current.s3db.pr_save_search
rows = db(table.user_id == user_id).select(table.ALL)
if rows:
import cPickle
for row in rows:
pat = "_"
s_v = cPickle.loads(row.search_vars)
if ((search_vars["prefix"] == s_v["prefix"]) and \
(search_vars["function"] == s_v["function"])):
s_dict = s_v["criteria"]
if "criteria" in search_vars:
c_dict = search_vars["criteria"]
else:
break
diff = [ k for k in c_dict if k not in s_dict ]
if not len(diff):
flag = 1
for j in s_dict.iterkeys():
if not re.match(pat, j):
if c_dict[j] != s_dict[j]:
flag = 0
break
if flag == 1:
return DIV(save_search_a,
_style="font-size:12px;padding:5px 0px 5px 90px;",
_id="save_search"
)
save_search_btn = A("Save Search",
_class="save_search_btn",
_id=save_search_btn_id,
_href="#",
_title=T("Save this search"))
save_search_a["_style"] = "display:none;"
save_search_processing = IMG(_src="/%s/static/img/ajax-loader.gif" % request.application,
_id=save_search_processing_id,
_class="save_search_processing_id",
_style="display:none;"
)
s_var = {}
s_var["save"] = True
jurl = URL(r=request, c=r.controller, f=r.function,
args=["search"], vars=s_var)
save_search_script = \
'''$('#%s').live('click',function(){
$('#%s').show()
$('#%s').hide()
$.ajax({
url:'%s',
data:'%s',
success:function(data){
$('#%s').show()
$('#%s').hide()
},
type:'POST'
})
return false
})''' % (save_search_btn_id,
save_search_processing_id,
save_search_btn_id,
jurl,
json.dumps(search_vars),
save_search_a_id,
save_search_processing_id)
current.response.s3.jquery_ready.append(save_search_script)
widget = DIV(save_search_processing,
save_search_a,
save_search_btn,
_style="font-size:12px;padding:5px 0px 5px 90px;",
_id="save_search"
)
return widget
# -------------------------------------------------------------------------
def search_interactive(self, r, **attr):
"""
Interactive search
@param r: the S3Request instance
@param attr: request parameters
"""
# Get environment
T = current.T
session = current.session
request = self.request
response = current.response
s3 = response.s3
resource = self.resource
settings = current.deployment_settings
db = current.db
s3db = current.s3db
gis = current.gis
table = self.table
tablename = self.tablename
# Get representation
representation = r.representation
# Initialize output
output = dict()
# Get table-specific parameters
config = self._config
sortby = config("sortby", [[1, "asc"]])
orderby = config("orderby", None)
list_fields = config("list_fields")
insertable = config("insertable", True)
# Initialize the form
form = DIV(_class="search_form form-container")
# Figure out which set of form values to use
# POST > GET > session > unfiltered
if r.http == "POST":
# POST
form_values = r.post_vars
else:
url_options = Storage([(k, v) for k, v in r.get_vars.iteritems() if v])
if url_options:
# GET
form_values = url_options
else:
session_options = session.s3.search_options
if session_options and tablename in session_options:
# session
session_options = session_options[tablename]
else:
# unfiltered
session_options = Storage()
form_values = session_options
# Build the search forms
simple_form, advanced_form = self.build_forms(r, form_values)
# Check for Load Search
if "load" in r.get_vars:
search_id = r.get_vars.get("load", None)
if not search_id:
r.error(400, current.manager.ERROR.BAD_RECORD)
r.post_vars = r.vars
search_table = s3db.pr_save_search
_query = (search_table.id == search_id)
record = db(_query).select(record.search_vars,
limitby=(0, 1)).first()
if not record:
r.error(400, current.manager.ERROR.BAD_RECORD)
import cPickle
s_vars = cPickle.loads(record.search_vars)
r.post_vars = Storage(s_vars["criteria"])
r.http = "POST"
# Process the search forms
query, errors = self.process_forms(r,
simple_form,
advanced_form,
form_values)
search_url = None
if not errors:
if hasattr(query, "serialize_url"):
search_url = r.url(method = "",
vars = query.serialize_url(resource))
resource.add_filter(query)
search_vars = dict(simple=False,
advanced=True,
criteria=form_values)
else:
search_vars = dict()
if representation == "plain":
# Map popup filter
# Return just the advanced form, no results
form.append(advanced_form)
output["item"] = form
response.view = self._view(r, "plain.html")
return output
if s3.simple_search:
form.append(DIV(_id="search-mode", _mode="simple"))
else:
form.append(DIV(_id="search-mode", _mode="advanced"))
# Save Search Widget
if session.auth and settings.get_save_search_widget():
save_search = self.save_search_widget(r, search_vars, **attr)
else:
save_search = DIV()
# Complete the output form-DIV()
if simple_form is not None:
simple_form.append(save_search)
form.append(simple_form)
if advanced_form is not None:
advanced_form.append(save_search)
form.append(advanced_form)
output["form"] = form
# Build session filter (for SSPag)
if not s3.no_sspag:
limit = 1
ids = resource.get_id()
if ids:
if not isinstance(ids, list):
ids = str(ids)
else:
ids = ",".join([str(i) for i in ids])
session.s3.filter = {"%s.id" % resource.name: ids}
else:
limit = None
# List fields
linkto = self._linkto(r)
if not list_fields:
fields = resource.readable_fields()
list_fields = [f.name for f in fields]
else:
fields = [table[f] for f in list_fields if f in table.fields]
if not fields:
fields = []
if fields[0].name != table.fields[0]:
fields.insert(0, table[table.fields[0]])
if list_fields[0] != table.fields[0]:
list_fields.insert(0, table.fields[0])
if not orderby:
orderby = fields[0]
# Truncate long texts
if r.interactive or representation == "aadata":
for f in table:
if str(f.type) == "text" and not f.represent:
f.represent = self.truncate
# Get the result table
items = resource.sqltable(fields=list_fields,
limit=limit,
orderby=orderby,
distinct=True,
linkto=linkto,
download_url=self.download_url,
format=representation)
# Remove the dataTables search box to avoid confusion
s3.dataTable_NobFilter = True
if items:
if not s3.no_sspag:
# Pre-populate SSPag cache (avoids the 1st Ajax request)
totalrows = resource.count(distinct=True)
if totalrows:
if s3.dataTable_iDisplayLength:
limit = 2 * s3.dataTable_iDisplayLength
else:
limit = 50
sqltable = resource.sqltable(fields=list_fields,
start=0,
limit=limit,
orderby=orderby,
distinct=True,
linkto=linkto,
download_url=self.download_url,
as_page=True,
format=representation)
aadata = dict(aaData=sqltable or [])
aadata.update(iTotalRecords=totalrows,
iTotalDisplayRecords=totalrows)
response.aadata = jsons(aadata)
s3.start = 0
s3.limit = limit
elif not items:
items = self.crud_string(tablename, "msg_no_match")
output["items"] = items
output["sortby"] = sortby
if isinstance(items, DIV):
filter = session.s3.filter
app = request.application
# Permalink
if search_url:
link = A(T("Link to this result"),
_href=search_url,
_class="permalink")
sep = " | "
else:
link = sep = ""
list_formats = DIV(link, sep,
"%s: " % T("Export to"),
A(IMG(_src="/%s/static/img/pdficon_small.gif" % app),
_title=T("Export in PDF format"),
_href=r.url(method="", representation="pdf",
vars=filter)),
A(IMG(_src="/%s/static/img/icon-xls.png" % app),
_title=T("Export in XLS format"),
_href=r.url(method="", representation="xls",
vars=filter)),
A(IMG(_src="/%s/static/img/RSS_16.png" % app),
_title=T("Export in RSS format"),
_href=r.url(method="", representation="rss",
vars=filter)),
_id="list_formats")
tabs = []
if "location_id" in table or \
"site_id" in table:
# Add a map for search results
# (this same map is also used by the Map Search Widget, if-present)
tabs.append((T("Map"), "map"))
app = request.application
list_formats.append(A(IMG(_src="/%s/static/img/kml_icon.png" % app),
_title=T("Export in KML format"),
_href=r.url(method="",
representation="kml",
vars=filter)),
)
# Build URL to load the features onto the map
if query:
vars = query.serialize_url(resource=resource)
else:
vars = None
url = URL(extension="geojson",
args=None,
vars=vars)
feature_resources = [{
"name" : T("Search Results"),
"id" : "search_results",
"url" : url,
"active" : False, # Gets activated when the Map is opened up
"marker" : gis.get_marker(request.controller, request.function)
}]
map_popup = gis.show_map(
feature_resources=feature_resources,
# Added by search widget onClick in s3.dataTables.js
#add_polygon = True,
#add_polygon_active = True,
catalogue_layers=True,
legend=True,
toolbar=True,
collapsed=True,
#search = True,
window=True,
window_hide=True
)
s3.dataTableMap = map_popup
if settings.has_module("msg") and \
("pe_id" in table or "person_id" in table):
# Provide the ability to Message person entities in search results
tabs.append((T("Message"), "compose"))
if tabs:
tabs.insert(0, ((T("List"), None)))
else:
list_formats = ""
tabs = []
# Search Tabs
search_tabs = s3_search_tabs(r, tabs)
output["search_tabs"] = search_tabs
# List Formats
output["list_formats"] = list_formats
# Title and subtitle
output["title"] = self.crud_string(tablename, "title_search")
output["subtitle"] = self.crud_string(tablename, "msg_match")
# View
response.view = self._view(r, "search.html")
# RHeader gets added later in S3Method()
return output
# -------------------------------------------------------------------------
def process_forms(self, r, simple_form, advanced_form, form_values):
"""
Validate the form values against the forms. If valid, generate
and return a query object. Otherwise return an empty query and
the errors.
If valid, save the values into the users' session.
"""
s3 = current.session.s3
query = None
errors = None
# Create a container in the session to saves search options
if "search_options" not in s3:
s3.search_options = Storage()
# Process the simple search form:
simple = simple_form is not None
if simple_form is not None:
if simple_form.accepts(form_values,
formname="search_simple"):
for name, widget in self.simple:
query, errors = self._build_widget_query(self.resource,
name,
widget,
simple_form,
query)
if errors:
simple_form.errors.update(errors)
errors = simple_form.errors
# Save the form values into the session
s3.search_options[self.tablename] = \
Storage([(k, v) for k, v in form_values.iteritems() if v])
elif simple_form.errors:
errors = simple_form.errors
return query, errors, simple
# Process the advanced search form:
if advanced_form is not None:
if advanced_form.accepts(form_values,
formname="search_advanced"):
simple = False
for name, widget in self.advanced:
query, errors = self._build_widget_query(self.resource,
name,
widget,
advanced_form,
query)
if errors:
advanced_form.errors.update(errors)
errors = advanced_form.errors
# Save the form values into the session
s3.search_options[self.tablename] = \
Storage([(k, v) for k, v in form_values.iteritems() if v])
elif advanced_form.errors:
simple = False
current.response.s3.simple_search = simple
return (query, errors)
# -------------------------------------------------------------------------
def build_forms(self, r, form_values=None):
"""
Builds a form customised to the module/resource. Includes a link
to the create form for this resource.
"""
simple = self.simple
advanced = self.advanced
T = current.T
tablename = self.tablename
representation = r.representation
simple_form = None
advanced_form = None
# Add-link (common to all forms)
ADD = self.crud_string(tablename, "label_create_button")
href_add = r.url(method="create", representation=representation)
insertable = self._config("insertable", True)
authorised = self.permit("create", tablename)
if authorised and insertable and representation != "plain":
add_link = self.crud_button(ADD, _href=href_add,
_id="add-btn", _class="action-lnk")
else:
add_link = ""
# Simple search form
if simple:
# Switch-link
if advanced:
switch_link = A(T("Advanced Search"), _href="#",
_class="action-lnk advanced-lnk")
else:
switch_link = ""
simple_form = self._build_form(simple,
form_values=form_values,
add=add_link,
switch=switch_link,
_class="simple-form")
# Advanced search form
if advanced:
if simple and not r.representation == "plain":
switch_link = A(T("Simple Search"), _href="#",
_class="action-lnk simple-lnk")
_class = "%s hide"
else:
switch_link = ""
_class = "%s"
advanced_form = self._build_form(advanced,
form_values=form_values,
add=add_link,
switch=switch_link,
_class=_class % "advanced-form")
return (simple_form, advanced_form)
# -------------------------------------------------------------------------
def _build_form(self, widgets, form_values=None, add="", switch="", **attr):
"""
@todo: docstring
"""
T = current.T
request = self.request
resource = self.resource
trows = []
for name, widget in widgets:
_widget = widget.widget(resource, form_values)
if _widget is None:
# Skip this widget as we have nothing but the label
continue
label = widget.field
if isinstance(label, (list, tuple)) and len(label):
label = label[0]
comment = ""
if hasattr(widget, "attr"):
label = widget.attr.get("label", label)
comment = widget.attr.get("comment", comment)
tr = TR(TD("%s: " % label, _class="w2p_fl"), _widget)
if comment:
tr.append(DIV(DIV(_class="tooltip",
_title="%s|%s" % (label, comment))))
trows.append(tr)
trows.append(TR("", TD(INPUT(_type="submit", _value=T("Search")),
switch, add)))
form = FORM(TABLE(trows), **attr)
return form
# -------------------------------------------------------------------------
def search_json(self, r, **attr):
"""
JSON search method for S3AutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
output = None
_vars = self.request.vars
# JQueryUI Autocomplete uses "term" instead of "value"
# (old JQuery Autocomplete uses "q" instead of "value")
value = _vars.value or _vars.term or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower().strip()
if _vars.field and _vars.filter and value:
s3db = current.s3db
resource = self.resource
table = self.table
limit = int(_vars.limit or 0)
fieldname = str.lower(_vars.field)
field = table[fieldname]
# Default fields to return
fields = [table.id, field]
if self.tablename == "org_site":
# Simpler to provide an exception case than write a whole new class
table = s3db.org_site
fields.append(table.instance_type)
filter = _vars.filter
if filter == "~":
# Normal single-field Autocomplete
query = (field.lower().like(value + "%"))
elif filter == "=":
if field.type.split(" ")[0] in \
["reference", "id", "float", "integer"]:
# Numeric, e.g. Organizations' offices_by_org
query = (field == value)
else:
# Text
query = (field.lower() == value)
elif filter == "<":
query = (field < value)
elif filter == ">":
query = (field > value)
else:
output = current.xml.json_message(
False,
400,
"Unsupported filter! Supported filters: ~, =, <, >")
raise HTTP(400, body=output)
# Exclude records which are already linked:
# ?link=<linktablename>.<leftkey>.<id>.<rkey>.<fkey>
# e.g. ?link=project_organisation.organisation_id.5.project_id.id
if "link" in _vars:
try:
link, lkey, _id, rkey, fkey = _vars.link.split(".")
linktable = s3db[link]
fq = (linktable[rkey] == table[fkey]) & \
(linktable[lkey] == _id)
linked = current.db(fq).select(table._id)
exclude = (~(table._id.belongs([r[table._id.name]
for r in linked])))
except Exception, e:
pass # ignore
else:
query &= exclude
# Select only or exclude template records:
# to only select templates:
# ?template=<fieldname>.<value>,
# e.g. ?template=template.true
# to exclude templates:
# ?template=~<fieldname>.<value>
# e.g. ?template=~template.true
if "template" in _vars:
try:
flag, val = _vars.template.split(".", 1)
if flag[0] == "~":
exclude = True
flag = flag[1:]
else:
exclude = False
ffield = table[flag]
except:
pass # ignore
else:
if str(ffield.type) == "boolean":
if val.lower() == "true":
val = True
else:
val = False
if exclude:
templates = (ffield != val)
else:
templates = (ffield == val)
resource.add_filter(templates)
resource.add_filter(query)
if filter == "~":
if (not limit or limit > MAX_SEARCH_RESULTS) and \
resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
if output is None:
output = resource.exporter.json(resource,
start=0,
limit=limit,
fields=fields,
orderby=field)
current.response.headers["Content-Type"] = "application/json"
else:
output = current.xml.json_message(
False,
400,
"Missing options! Require: field, filter & value")
raise HTTP(400, body=output)
return output
# -------------------------------------------------------------------------
@staticmethod
def _check_search_autcomplete_search_simple_widget(widget):
"""
@todo: docstring
"""
if not isinstance(widget, S3SearchSimpleWidget):
raise SyntaxError("First simple widget for Search AutoComplete must be S3SearchSimpleWidget")
# -------------------------------------------------------------------------
def search_autocomplete(self, r, **attr):
"""
Interactive search
@param r: the S3Request instance
@param attr: request parameters
"""
# Get environment
T = current.T
resource = self.resource
vars = self.request.get_vars
resource.clear_query()
# Fieldname of the value for the autocomplete (default to id)
get_fieldname = attr.get("get_fieldname")
fieldname = attr.get("fieldname")
value = attr.get("value")
# Get representation
representation = r.representation
# Initialize output
feature_queries = []
bounds = None
output = dict()
simple = False
# Get table-specific parameters
sortby = self._config("sortby", [[1, "asc"]])
orderby = self._config("orderby", None)
list_fields = self._config("list_fields")
insertable = self._config("insertable", True)
# Initialize the form
form_attr = dict(_class="search_form form-container",
_prefix=resource.prefix,
_resourcename=resource.name,
_fieldname=fieldname,
)
if get_fieldname:
form_attr["_get_fieldname"] = get_fieldname
# Otherwise default get_fieldname is "id"
form = DIV(**form_attr)
# Append the simple search form
if self.simple:
simple = True
if self.advanced:
switch_link = A(T("Advanced Search"), _href="#",
_class="action-lnk advanced-lnk %s",
_fieldname=fieldname)
else:
switch_link = ""
# Only display the S3SearchSimpleWidget (should be first)
name, widget = self.simple[0]
self._check_search_autcomplete_search_simple_widget(widget)
name = "%s_search_simple_simple" % fieldname
autocomplete_widget = widget.widget(resource,
vars,
name=name,
value=value,
autocomplete="off")
simple_form = DIV(TABLE(autocomplete_widget,
switch_link
),
_class="simple-form")
form.append(simple_form)
# Append the advanced search form
if self.advanced:
trows = []
first_widget = True
for name, widget in self.advanced:
_widget = widget.widget(resource, vars)
if _widget is None:
# Skip this widget as we have nothing but the label
continue
label = widget.field
if first_widget:
self._check_search_autcomplete_search_simple_widget(widget)
name = "%s_search_simple_advanced" % fieldname
autocomplete_widget = widget.widget(resource,
vars,
name=name,
value=value,
autocomplete="off")
first_widget = False
else:
if isinstance(label, (list, tuple)) and len(label):
label = label[0]
if hasattr(widget, "attr"):
label = widget.attr.get("label", label)
tr = TR(TD("%s: " % label, _class="w2p_fl"), _widget)
trows.append(tr)
if self.simple:
switch_link = A(T("Simple Search"), _href="#",
_class="action-lnk simple-lnk",
_fieldname=fieldname)
else:
switch_link = ""
if simple:
_class = "hide"
else:
_class = None
advanced_form = DIV(autocomplete_widget,
TABLE(trows),
TABLE(TR(switch_link)),
_class="%s advanced-form" % _class,
#_resourcename = resource.name
)
form.append(advanced_form)
output.update(form=form)
return output
# -------------------------------------------------------------------------
def search_json_autocomplete(self, r, **attr):
"""
@todo: docstring
"""
query = None
errors = True
request = self.request
resource = self.resource
response = current.response
response.headers["Content-Type"] = "application/json"
# Process the simple search form:
if self.simple and request.vars.simple_form:
for name, widget in self.simple:
# Pass request instead of form - it contains the vars
query, errors = self._build_widget_query(resource,
name,
widget,
request,
query)
if errors:
break
# Process the advanced search form:
elif self.advanced:
for name, widget in self.advanced:
# Pass request instead of form - it contains the vars
query, errors = self._build_widget_query(resource,
name,
widget,
request,
query)
if errors:
break
else:
errors = True
resource.add_filter(query)
try:
get_fieldname = request.vars.get("get_fieldname", "id")
field = resource.table[get_fieldname]
except:
errors = True
# How can this be done more elegantly?
resource_represent = { "human_resource":
lambda id: \
response.s3.hrm_human_resource_represent(id,
show_link=True)
}
if get_fieldname == "id":
represent = resource_represent[resource.name]
else:
represent = field.represent
attributes = dict(orderby=field,
limitby=resource.limitby(start=0, limit=11),
distinct=True)
# Get the rows
rows = resource.select(field, **attributes)
if not errors:
output = [{ "id" : row[get_fieldname],
"represent" : str(represent(row[get_fieldname]))
} for row in rows ]
else:
jsons("{}")
return jsons(output)
# -------------------------------------------------------------------------
@staticmethod
def save_search(r, **attr):
"""
Save a Search Filter in the user's profile
- db.pr_save_search
"""
search_vars = json.load(r.body)
s_vars = {}
for i in search_vars.iterkeys():
if str(i) == "criteria" :
s_dict = {}
c_dict = search_vars[i]
for j in c_dict.iterkeys():
key = str(j)
s_dict[key] = str(c_dict[j])
s_vars[str(i)] = s_dict
else:
key = str(i)
s_vars[key] = str(search_vars[i])
import cPickle
search_str = cPickle.dumps(s_vars)
table = current.s3db.pr_save_search
query = (table.user_id == current.auth.user_id) & \
(table.search_vars == search_str)
if len(current.db(query).select(table.id)) == 0:
new_search = {}
new_search["search_vars"] = search_str
_id = table.insert(**new_search)
msg = "success"
return msg
# =============================================================================
class S3LocationSearch(S3Search):
"""
Search method with specifics for Location records (hierarchy search)
"""
def search_json(self, r, **attr):
"""
JSON search method for S3LocationAutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
output = None
response = current.response
resource = self.resource
table = self.table
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars
limit = int(_vars.limit or 0)
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
if value:
value = value.lower().strip()
query = None
fields = []
field = table.id
if _vars.field and _vars.filter and value:
fieldname = str.lower(_vars.field)
field = table[fieldname]
# Default fields to return
fields = [table.id,
table.name,
table.level,
table.parent,
table.path,
table.uuid,
table.lat,
table.lon,
table.addr_street,
table.addr_postcode]
# Optional fields
if "level" in _vars and _vars.level:
if _vars.level == "null":
level = None
elif "|" in _vars.level:
level = _vars.level.split("|")
else:
level = str.upper(_vars.level)
else:
level = None
if "parent" in _vars and _vars.parent:
if _vars.parent == "null":
parent = None
else:
parent = int(_vars.parent)
else:
parent = None
if "children" in _vars and _vars.children:
if _vars.children == "null":
children = None
else:
children = int(_vars.children)
else:
children = None
if "field2" in _vars and _vars.field2:
fieldname = str.lower(_vars.field2)
field2 = table[fieldname]
else:
field2 = None
if "exclude_field" in _vars:
exclude_field = str.lower(_vars.exclude_field)
if "exclude_value" in _vars:
exclude_value = str.lower(_vars.exclude_value)
else:
exclude_value = None
else:
exclude_field = None
exclude_value = None
filter = _vars.filter
if filter == "~":
if children:
# New LocationSelector
children = current.gis.get_children(children, level=level)
children = children.find(lambda row: \
row.name and value in str.lower(row.name))
output = children.json()
response.headers["Content-Type"] = "application/json"
return output
if exclude_field and exclude_value:
# Old LocationSelector
# Filter out poor-quality data, such as from Ushahidi
query = (field.lower().like(value + "%")) & \
((table[exclude_field].lower() != exclude_value) | \
(table[exclude_field] == None))
elif field2:
# New LocationSelector
query = ((field.lower().like(value + "%")) | \
(field2.lower().like(value + "%")))
else:
# Normal single-field
query = (field.lower().like(value + "%"))
if level:
resource.add_filter(query)
# New LocationSelector or Autocomplete
if isinstance(level, list):
query = (table.level.belongs(level))
elif str.upper(level) == "NULLNONE":
level = None
query = (table.level == level)
else:
query = (table.level == level)
if parent:
# New LocationSelector
resource.add_filter(query)
query = (table.parent == parent)
elif filter == "=":
if field.type.split(" ")[0] in \
["reference", "id", "float", "integer"]:
# Numeric, e.g. Organizations' offices_by_org
query = (field == value)
else:
# Text
if value == "nullnone":
# i.e. old Location Selector
query = (field == None)
else:
query = (field.lower() == value)
if parent:
# i.e. gis_location hierarchical search
resource.add_filter(query)
query = (table.parent == parent)
fields = [table.id,
table.name,
table.level,
table.uuid,
table.parent,
table.lat,
table.lon,
table.addr_street,
table.addr_postcode]
else:
output = current.xml.json_message(
False,
400,
"Unsupported filter! Supported filters: ~, ="
)
raise HTTP(400, body=output)
if not fields:
append = fields.append
for field in table.fields:
append(table[field])
resource.add_filter(query)
if filter == "~":
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
elif not parent:
if (not limit or limit > MAX_RESULTS) and resource.count() > MAX_RESULTS:
output = jsons([])
if output is None:
output = resource.exporter.json(resource,
start=0,
limit=limit,
fields=fields,
orderby=field)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3OrganisationSearch(S3Search):
"""
Search method with specifics for Organisation records
- searches name & acronym for both this organisation & the parent of
branches
"""
def search_json(self, r, **attr):
"""
JSON search method for S3OrganisationAutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
response = current.response
resource = self.resource
table = self.table
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower().strip()
filter = _vars.filter
if filter and value:
if filter == "~":
query = (S3FieldSelector("parent.name").lower().like(value + "%")) | \
(S3FieldSelector("parent.acronym").lower().like(value + "%")) | \
(S3FieldSelector("organisation.name").lower().like(value + "%")) | \
(S3FieldSelector("organisation.acronym").lower().like(value + "%"))
else:
output = current.xml.json_message(
False,
400,
"Unsupported filter! Supported filters: ~"
)
raise HTTP(400, body=output)
resource.add_filter(query)
limit = int(_vars.limit or 0)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
else:
btable = current.s3db.org_organisation_branch
field = table.name
field2 = table.acronym
field3 = btable.organisation_id
# Fields to return
fields = [table.id, field, field2, field3]
attributes = dict(orderby=field)
limitby = resource.limitby(start=0, limit=limit)
if limitby is not None:
attributes["limitby"] = limitby
rows = resource.select(*fields, **attributes)
output = []
append = output.append
db = current.db
for row in rows:
name = row[table].name
parent = None
if "org_organisation_branch" in row:
query = (table.id == row[btable].organisation_id)
parent = db(query).select(table.name,
limitby = (0, 1)).first()
if parent:
name = "%s > %s" % (parent.name,
name)
if not parent:
acronym = row[table].acronym
if acronym:
name = "%s (%s)" % (name,
acronym)
record = dict(
id = row[table].id,
name = name,
)
append(record)
output = jsons(output)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3PersonSearch(S3Search):
"""
Search method for Persons
"""
def search_json(self, r, **attr):
"""
JSON search method for S3PersonAutocompleteWidget
- full name search
"""
response = current.response
resource = self.resource
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
output = current.xml.json_message(
False,
400,
"No value provided!"
)
raise HTTP(400, body=output)
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if " " in value:
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = (S3FieldSelector("first_name").lower().like(value1 + "%")) & \
((S3FieldSelector("middle_name").lower().like(value2 + "%")) | \
(S3FieldSelector("last_name").lower().like(value2 + "%")))
else:
value = value.strip()
query = ((S3FieldSelector("first_name").lower().like(value + "%")) | \
(S3FieldSelector("middle_name").lower().like(value + "%")) | \
(S3FieldSelector("last_name").lower().like(value + "%")))
resource.add_filter(query)
limit = int(_vars.limit or 0)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
else:
fields = ["id",
"first_name",
"middle_name",
"last_name",
]
rows = resource.sqltable(fields=fields,
start=0,
limit=limit,
orderby="pr_person.first_name",
as_rows=True)
if rows:
items = [{
"id" : row.id,
"first" : row.first_name,
"middle" : row.middle_name or "",
"last" : row.last_name or "",
} for row in rows ]
else:
items = []
output = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3HRSearch(S3Search):
"""
Search method for Human Resources
"""
def search_json(self, r, **attr):
"""
JSON search method for S3HumanResourceAutocompleteWidget
- full name search
- include Organisation & Job Role in the output
"""
resource = self.resource
response = current.response
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
output = current.xml.json_message(
False,
400,
"No value provided!"
)
raise HTTP(400, body=output)
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if " " in value:
# Multiple words
# - check for match of first word against first_name
# - & second word against either middle_name or last_name
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = ((S3FieldSelector("person_id$first_name").lower().like(value1 + "%")) & \
((S3FieldSelector("person_id$middle_name").lower().like(value2 + "%")) | \
(S3FieldSelector("person_id$last_name").lower().like(value2 + "%"))))
else:
# Single word - check for match against any of the 3 names
value = value.strip()
query = ((S3FieldSelector("person_id$first_name").lower().like(value + "%")) | \
(S3FieldSelector("person_id$middle_name").lower().like(value + "%")) | \
(S3FieldSelector("person_id$last_name").lower().like(value + "%")))
resource.add_filter(query)
limit = int(_vars.limit or 0)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
else:
fields = ["id",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title_id$name",
]
show_orgs = current.deployment_settings.get_hrm_show_organisation()
if show_orgs:
fields.append("organisation_id$name")
rows = resource.sqltable(fields=fields,
start=0,
limit=limit,
orderby="pr_person.first_name",
as_rows=True)
if rows:
items = [{
"id" : row["hrm_human_resource"].id,
"first" : row["pr_person"].first_name,
"middle" : row["pr_person"].middle_name or "",
"last" : row["pr_person"].last_name or "",
"org" : row["org_organisation"].name if show_orgs else "",
"job" : row["hrm_job_title"].name or "",
} for row in rows ]
else:
items = []
output = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3PentitySearch(S3Search):
"""
Search method with specifics for Pentity records (full name search)
"""
def search_json(self, r, **attr):
"""
Legacy JSON search method (for autocomplete widgets)
@param r: the S3Request
@param attr: request attributes
"""
response = current.response
resource = self.resource
table = self.table
s3db = current.s3db
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
filter = _vars.filter
limit = int(_vars.limit or 0)
# Persons
if filter and value:
ptable = s3db.pr_person
field = ptable.first_name
field2 = ptable.middle_name
field3 = ptable.last_name
if filter == "~":
# pr_person Autocomplete
if " " in value:
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = (field.lower().like(value1 + "%")) & \
(field2.lower().like(value2 + "%")) | \
(field3.lower().like(value2 + "%"))
else:
value = value.strip()
query = ((field.lower().like(value + "%")) | \
(field2.lower().like(value + "%")) | \
(field3.lower().like(value + "%")))
resource.add_filter(query)
else:
output = current.xml.json_message(
False,
400,
"Unsupported filter! Supported filters: ~"
)
raise HTTP(400, body=output)
resource.add_filter(ptable.pe_id == table.pe_id)
output = resource.exporter.json(resource, start=0, limit=limit,
fields=[table.pe_id], orderby=field)
items = json.loads(output)
# Add Groups
if filter and value:
gtable = s3db.pr_group
field = gtable.name
query = field.lower().like("%" + value + "%")
resource.clear_query()
resource.add_filter(query)
resource.add_filter(gtable.pe_id == table.pe_id)
output = resource.exporter.json(resource,
start=0,
limit=limit,
fields=[table.pe_id],
orderby=field)
items += json.loads(output)
# Add Organisations
if filter and value:
otable = s3db.org_organisation
field = otable.name
query = field.lower().like("%" + value + "%")
resource.clear_query()
resource.add_filter(query)
resource.add_filter(otable.pe_id == table.pe_id)
output = resource.exporter.json(resource,
start=0,
limit=limit,
fields=[table.pe_id],
orderby=field)
items += json.loads(output)
items = [ { "id" : item[u'pe_id'],
"name" : s3db.pr_pentity_represent(item[u'pe_id'],
show_label=False) }
for item in items ]
output = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3SearchOrgHierarchyWidget(S3SearchOptionsWidget):
def widget(self, resource, vars):
field_name = self.field
# check the field type
try:
field = resource.table[field_name]
except:
field_type = "virtual"
else:
field_type = str(field.type)
return S3OrganisationHierarchyWidget()(field, {}, **self.attr)
# END =========================================================================
|
ashwyn/eden-message_parser
|
modules/s3/s3search.py
|
Python
|
mit
| 99,139
|
"""Tests for the Safe Mode integration."""
|
Danielhiversen/home-assistant
|
tests/components/safe_mode/__init__.py
|
Python
|
apache-2.0
| 43
|
#!/usr/bin/python
import sys
sys.path.append('..')
from bcert_pb2 import *
import binascii
# fill out a minimal bitcoin cert
cert = BitcoinCert()
# first the data part (the part is later signed by the "higher level cert" or "the blockchain")
cert.data.version = '0.1'
cert.data.subjectname = 'Foo Project'
email = cert.data.contacts.add()
email.type = email.EMAIL
email.value = 'A@fooproject.com'
email = cert.data.contacts.add()
email.type = email.EMAIL
email.value = 'B@fooproject.com'
url = cert.data.contacts.add()
url.type = url.URL
url.value = 'http://www.fooproject.com'
paykey = cert.data.paymentkeys.add()
paykey.usage = paykey.PAYMENT
paykey.algorithm.type = paykey.algorithm.P2CMULTI # is default anyway
paykey.algorithm.version = '0.1'
paykey.value.append("2")
paykey.value.append("0285b2eb2c0f2e4a12646dbcf38d08c29ef557b5616048575b133a2084a56bb84a".decode('hex'))
paykey.value.append("03ba3137ddbee4e164390b7b67e0975d12969ef23ac1fd7b1f7e880319d072b323".decode('hex'))
# this is standard in bitcoin ripemd(sha256())
from bitcoin import hash_160
# add signature to cert
#sig = cert.signatures.add()
#sig.algorithm.type = sig.algorithm.BCPKI
#sig.algorithm.version = "0.3"
#sig.value = "foo1" # for signatures of type BCPKI the alias IS the value,
# other types place the signature of BitcoinCertDataToHash(certData) here,
# for BCPKI this hash appears in the blockchain instead
# see how the cert looks
print cert
# serialize it
def CertToAscii(cert):
ser = cert.SerializeToString()
crc = binascii.crc32(ser) & 0xffffff # keep only last 24 bit (should use CRC-24 like OpenPGP)
# OpenPGP uses initializations for its crc-24, see http://tools.ietf.org/html/rfc2440
asc = binascii.b2a_base64(cert.SerializeToString())[:-1] # without trailing newline
asc += '=' # checksum is seperated by =
asc += binascii.b2a_base64(('%06x'%crc).decode('hex'))
return asc
def CertToAsciiMsg(cert):
ver = cert.version
asc = CertToAscii(cert)
res = '-----BEGIN BTCPKI CERTIFICATE-----\n'
res += 'Version: '+cert.version+'\n\n'
res += '\n'.join(asc[i:i+72] for i in xrange(0, len(asc), 72))
res += '-----END BTCPKI CERTIFICATE-----\n'
return res
# TODO: AsciiToCert
from e import derivepubkey
#print "deriving filename from: "+normalized
#fname = id+'.bcrt'
fname = 'fooproject.bcrt'
f=open(fname,'wb')
f.write(cert.SerializeToString())
f.close()
print "binary cert written to: "+fname
#fname = id+'.acrt'
#f=open(fname,'wb')
#f.write(CertToAscii(cert))
#f.close()
#print "ascii cert written to: "+fname
#fname = 'my.data'
#f=open(fname,'wb')
#f.write(cert.data.SerializeToString())
#f.close()
#print "binary data part written to: "+fname
# see the hash
print "hash of data part is: "+hash_160(cert.data.SerializeToString()).encode('hex')
print "hex binary cert: "+cert.SerializeToString().encode('hex')
#print CertToAscii(cert)
#print CertToAsciiMsg(cert)
# OLD
#from subprocess import Popen,PIPE,check_call,call
#p = Popen(['./bitcoind','-testnet','registeralias','foo3','0.5',hash],stdout=PIPE)
#result = p.stdout.read()
#print result
|
bcpki/bitcoin
|
src/bcert/examples/mk_fooproject.py
|
Python
|
mit
| 3,118
|
#!/usr/bin/env python
"""Utils package for Avro Phonetic.
-------------------------------------------------------------------------------
Copyright (C) 2013 Kaustav Das Modak <kaustav.dasmodak@yahoo.co.in.
This file is part of pyAvroPhonetic.
pyAvroPhonetic is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyAvroPhonetic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyAvroPhonetic. If not, see <http://www.gnu.org/licenses/>.
"""
def utf(text):
"""Shortcut funnction for encoding given text with utf-8"""
try:
#output = unicode(text, encoding='utf-8')
output = text
except UnicodeDecodeError:
output = text
except TypeError:
output = text
return output
|
improlabs/Banglish-Sentiment-Analysis
|
python3/pyavrophonetic/utils/__init__.py
|
Python
|
gpl-3.0
| 1,182
|
import logging
import utils
import options
_Warning = logging.Warning
_Info = logging.Info
#//===========================================================================//
_site_setup = []
_user_setup = {}
_tools_setup = {}
_tools_post_setup = {}
def ResetSetup( site_setup = _site_setup,
user_setup = _user_setup,
tools_setup = _tools_setup,
tools_post_setup = _tools_post_setup ):
if __debug__:
_Info( "ResetSetup" )
del site_setup[:]
user_setup.clear()
tools_setup.clear()
tools_post_setup.clear()
#//===========================================================================//
def AddSiteSetup( setup_function, _site_setup = _site_setup, toList = utils.toList ):
_site_setup.append( setup_function )
def siteSetup( setup_function ):
AddSiteSetup( setup_function )
return setup_function
def SiteSetup( options, os_env ):
global _site_setup
for f in _site_setup:
if __debug__:
_Info( "Site setup: " + f.__name__ )
f( options = options, os_env = os_env )
UserSetup( options, os_env )
#//===========================================================================//
def AddUserSetup( setup_id, setup_function, user_setup = _user_setup ):
user_setup.setdefault( setup_id, [] ).append( setup_function )
def UserSetup( options, os_env, user_setup = _user_setup ):
for s in options.setup.Value():
if __debug__:
_Info( "User setup: " + s )
for f in user_setup.get( s, [] ):
f( options = options, os_env = os_env )
#//===========================================================================//
def AddToolSetup( tool_name, setup_function, tools_setup = _tools_setup, toList = utils.toList ):
tools_setup.setdefault( tool_name, [] ).append( setup_function )
def toolSetup( tool_name ):
def addToolSetup( setup_function ):
AddToolSetup( tool_name, setup_function )
return setup_function
return addToolSetup
#//===========================================================================//
def _tool_setup( tool_name, env, tools_setup = _tools_setup ):
options = env.get( 'AQL_OPTIONS' )
if options is None:
if __debug__:
_Warning( "Tool setup: No AQL_OPTIONS in env: " + id(env) )
return
options.SetEnv( env )
os_env = env['ENV']
setup_functions = tools_setup.get( tool_name, [] )
if __debug__:
if not setup_functions:
#~ _Info( "Setup tool: No setup for tool: " + tool_name )
return
for f in setup_functions:
if __debug__:
_Info( "Tool setup: " + tool_name + ' (' + f.__name__ + ')' )
if f( env = env, options = options, os_env = os_env ):
break
#//===========================================================================//
def AddToolPostSetup( tool_name, setup_function, tools_post_setup = _tools_post_setup ):
tools_post_setup.setdefault( tool_name, [] ).append( setup_function )
def toolPostSetup( tool_name ):
def addToolPostSetup( setup_function ):
AddToolPostSetup( tool_name, setup_function )
return setup_function
return addToolPostSetup
#//===========================================================================//
def _tool_post_setup( tool_name, env, tools_post_setup = _tools_post_setup ):
options = env.get( 'AQL_OPTIONS' )
if options is None:
return
options.SetEnv( env )
os_env = env['ENV']
setup_functions = tools_post_setup.get( tool_name, [] )
if __debug__:
if not setup_functions:
#~ _Info( "Tool post setup: No setup for tool: " + tool_name )
return
for f in setup_functions:
if __debug__:
_Info( "Tool post setup: " + tool_name + ' (' + f.__name__ + ')' )
f( env = env, options = options, os_env = os_env )
#//===========================================================================//
def _tool_exists( self, env ):
if self._aql_is_exist is None:
_tool_setup( self.name, env )
self._aql_is_exist = self._aql_exists( env )
return self._aql_is_exist
#//===========================================================================//
def _tool_generate( self, env ):
if self._aql_is_exist is None:
if not _tool_exists( self, env ):
_Warning( "Tool: '%s' has not been found, but it has been added." % (self.name) )
self._aql_generate( env )
_tool_post_setup( self.name, env )
#//===========================================================================//
def _init_tool( self, name, toolpath = [], **kw ):
_SCons_Tool_Tool_init( self, name, toolpath, **kw )
self._aql_is_exist = None
self._aql_generate = self.generate
self._aql_exists = self.exists
self.exists = lambda env, self = self: _tool_exists( self, env )
self.generate = lambda env, self = self: _tool_generate( self, env )
#//===========================================================================//
import SCons.Tool
_SCons_Tool_Tool_init = SCons.Tool.Tool.__init__
SCons.Tool.Tool.__init__ = _init_tool
|
menify/sandbox
|
trunk/setup.py
|
Python
|
mit
| 5,383
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geopastebin',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
PROJECT_ROOT = os.path.dirname(__file__)
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
OLWIDGET_MEDIA_URL = '/static/olwidget/'
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = 'notverysecret'
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
# These apps required for geopastebin.
'django.contrib.gis',
'geopastebin',
'olwidget',
)
# Include any settings in local_settings.py
try:
from local_settings import *
except ImportError:
pass
|
pragmaticbadger/geopastebin
|
example_project/settings.py
|
Python
|
bsd-3-clause
| 1,183
|
df14['D'].day()
# D
# ---
# 18
# 19
# 20
|
mathemage/h2o-3
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_display_day_of_month.py
|
Python
|
apache-2.0
| 46
|
# Copyright (C) 2012 Equinor ASA, Norway.
#
# The file 'enkf_state.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from res import ResPrototype
from res.enkf.enums import EnkfInitModeEnum, EnkfVarType
class EnKFState(BaseCClass):
TYPE_NAME = "enkf_state"
_free = ResPrototype("void enkf_state_free( enkf_state )")
_get_ens_config = ResPrototype(
"ens_config_ref enkf_state_get_ensemble_config( enkf_state )"
)
_initialize = ResPrototype(
"void enkf_state_initialize( enkf_state , enkf_fs , stringlist , enkf_init_mode_enum)"
)
_forward_model_OK = ResPrototype(
"bool enkf_state_complete_forward_modelOK(res_config, run_arg)", bind=False
)
_forward_model_EXIT = ResPrototype(
"bool enkf_state_complete_forward_model_EXIT_handler__(run_arg)", bind=False
)
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly!")
def free(self):
self._free()
def ensembleConfig(self):
"""@rtype: EnsembleConfig"""
return self._get_ens_config()
def initialize(
self, fs, param_list=None, init_mode=EnkfInitModeEnum.INIT_CONDITIONAL
):
if param_list is None:
ens_config = self.ensembleConfig()
param_list = ens_config.getKeylistFromVarType(EnkfVarType.PARAMETER)
self._initialize(fs, param_list, init_mode)
@classmethod
def forward_model_exit_callback(cls, args):
return cls._forward_model_EXIT(args[0])
@classmethod
def forward_model_ok_callback(cls, args):
return cls._forward_model_OK(args[1], args[0])
|
joakim-hove/ert
|
res/enkf/enkf_state.py
|
Python
|
gpl-3.0
| 2,195
|
# -*- coding: utf-8 -*-
"""
sphinxcontrib.chapeldomain
~~~~~~~~~~~~~~~~~~~~~~~~~~
The Chapel language domain.
:copyright: Copyright 2015 by Chapel Team
:license: Apache v2.0, see LICENSE for details.
Chapel website: http://chapel.cray.com/
Chapel spec: http://chapel.cray.com/language.html
"""
import re
from typing import Dict, List, Tuple
from docutils import nodes
from docutils.nodes import Node
from docutils.parsers.rst.states import Inliner
from docutils.parsers.rst import directives
from six import iteritems
from sphinx import addnodes
from sphinx.environment import BuildEnvironment
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index, ObjType
from sphinx.locale import _
from sphinx.roles import XRefRole
from docutils.parsers.rst import Directive
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.nodes import make_refnode
from sphinxcontrib.chapeldomain.chapel import ChapelLexer
VERSION = '0.0.21'
# regex for parsing proc, iter, class, record, etc.
chpl_sig_pattern = re.compile(
r"""^ ((?:\w+\s+)* # optional: prefixes
(?:proc|iter|class|record)\s+ # must end with keyword
(?:type\s+|param\s+)? # optional: type or param method
)?
([\w$.]*\.)? # class name(s)
([\w\+\-/\*$\<\=\>\!]+) \s* # function or method name
(?:\((.*?)\))? # optional: arguments
(\s+(?:const\s)? \w+| # or return intent
\s* : \s* [^:]+| # or return type
\s+(?:const\s)? \w+\s* : \s* [^:]+ # or return intent and type
)?
$""", re.VERBOSE)
# regex for parsing attribute and data directives.
chpl_attr_sig_pattern = re.compile(
r"""^ ((?:\w+\s+)*)? # optional: prefixes
([\w$.]*\.)? # class name(s)
([\w$]+) # const, var, param, etc name
(\s* [:={] \s* .+)? # optional: type, default value
$""", re.VERBOSE)
# This would be the ideal way to create a chapelerific desc_returns similar to
# addnodes.desc_returns. However, due to some update issue, the
# nodes._add_node_class_names() call does not seem to make chapel_desc_returns
# to the sphinx html write. So, we'll just use addnodes.desc_returns
# directly. :-\ (thomasvandoren, 2015-02-19)
# class chapel_desc_returns(addnodes.desc_type):
# """Node for a "returns" annotation."""
# nodes._add_node_class_names([chapel_desc_returns.__name__])
class ChapelTypedField(TypedField):
"""Override TypedField in order to change output format."""
def make_field(self, types: Dict[str, List[Node]], domain: str,
items: Tuple, env: BuildEnvironment = None,
inliner: Inliner = None,
location: Node = None) -> nodes.field:
"""Copy+Paste of TypedField.make_field() from Sphinx version 4.3.2. The first
and second nodes.Text() instance are changed in this implementation to
be ' : ' and '' respectively (instead of ' (' and ')').
TODO: Ask sphinx devs if there is a better way to support
this that is less copy+pasty. (thomasvandoren, 2015-03-17)
"""
def handle_item(fieldarg: str, content: str) -> nodes.paragraph:
par = nodes.paragraph()
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
addnodes.literal_strong, env=env))
if fieldarg in types:
par += nodes.Text(' : ')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0],
nodes.Text):
typename = fieldtype[0].astext()
par.extend(self.make_xrefs(self.typerolename, domain,
typename,
addnodes.literal_emphasis,
env=env, inliner=inliner,
location=location))
else:
par += fieldtype
par += nodes.Text('')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode: Node = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
class ChapelObject(ObjectDescription):
"""Base class for Chapel directives. It has methods for parsing signatures of
any form, and generating target and index text.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
ChapelTypedField('parameter', label=_('Arguments'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='chplref',
typenames=('paramtype', 'type'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('yieldvalue', label=_('Yields'), has_arg=False,
names=('yields', 'yield')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
Field('yieldtype', label=_('Yield type'), has_arg=False,
names=('ytype',)),
GroupedField('errorhandling', label=_('Throws'),
names=('throw', 'throws'), can_collapse=True),
]
@staticmethod
def _pseudo_parse_arglist(signode, arglist):
"""Parse list of comma separated arguments.
Arguments can have optional types.
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = 0
ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# If there are too few or too many elements on the stack, just give
# up and treat the whole argument list as one argument, discarding
# the already partially populated paramlist node.
signode += addnodes.desc_parameterlist()
signode[-1] += addnodes.desc_parameter(arglist, arglist)
else:
signode += paramlist
def _get_attr_like_prefix(self, sig):
"""Return prefix text for attribute or data directive."""
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype == 'type':
return 'type' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _get_proc_like_prefix(self, sig):
"""Return prefix text for function or method directive
(and similar).
"""
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
return ChapelObject.get_signature_prefix(self, sig)
prefixes, _, _, _, _ = sig_match.groups()
if prefixes:
return prefixes.strip() + ' '
elif self.objtype.startswith('iter'):
return 'iter' + ' '
elif self.objtype in ('method', 'function'):
return 'proc' + ' '
else:
return ChapelObject.get_signature_prefix(self, sig)
def _is_attr_like(self):
"""Returns True when objtype is attribute or data."""
return self.objtype in ('attribute', 'data', 'type', 'enum')
def _is_proc_like(self):
"""Returns True when objtype is *function or *method."""
return (self.objtype in
('function', 'iterfunction', 'method', 'itermethod'))
def _get_sig_prefix(self, sig):
"""Return signature prefix text. For attribute, data, and proc/iter directives
this might be part of the signature. E.g. `type myNewType` will return
a prefix of 'type' and `inline proc foo()` will return 'inline proc'.
"""
if self._is_proc_like():
return self._get_proc_like_prefix(sig)
elif self._is_attr_like():
return self._get_attr_like_prefix(sig)
else:
return ChapelObject.get_signature_prefix(self, sig)
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in
the signature.
"""
return ''
def needs_arglist(self):
"""May return True if an empty argument list is to be generated even if the
document contains none.
"""
return False
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name):
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
"""Add cross-reference IDs and entries to the index node, if
applicable. *name_cls* is the return value of
:py:meth:`handle_signature`.
"""
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['chpl']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, '', None))
def before_content(self):
"""Called before parsing content. Set flag to help with class scoping.
"""
self.clsname_set = False
def after_content(self):
"""Called after parsing content. If any classes were added to the env
temp_data, make sure they are removed.
"""
if self.clsname_set:
self.env.temp_data.pop('chpl:class', None)
class ChapelModule(Directive):
"""Directive to make description of a new module."""
has_content = False
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
def run(self):
"""Custom execution for chapel module directive. This class is instantiated by
the directive implementation and then this method is called. It parses
the options on the module directive, updates the environment according,
and creates an index entry for the module.
Based on the python domain module directive.
"""
env = self.state.document.settings.env
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
env.temp_data['chpl:module'] = modname
ret = []
if not noindex:
env.domaindata['chpl']['modules'][modname] = \
(env.docname, self.options.get('synopsis', ''),
self.options.get('platform', ''),
'deprecated' in self.options)
# Make a duplicate entry in 'objects' to facilitate searching for
# the module in ChapelDomain.find_obj().
env.domaindata['chpl']['objects'][modname] = (
env.docname, 'module')
targetnode = nodes.target('', '', ids=['module-' + modname],
ismod=True)
self.state.document.note_explicit_target(targetnode)
# The platform and synopsis are not printed. In fact, they are only
# used in the modindex currently.
ret.append(targetnode)
indextext = _('%s (module)') % modname
inode = addnodes.index(entries=[('single', indextext,
'module-' + modname, '', None)])
ret.append(inode)
return ret
class ChapelCurrentModule(Directive):
"""this directive is just to tell Sphinx that we're documenting stuff in module
foo, but links to module foo won't lead here.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
"""See :py:meth:`ChapelModule.run`"""
env = self.state.document.settings.env
modname = self.arguments[0].strip()
if modname == 'None':
env.temp_data['chpl:module'] = None
else:
env.temp_data['chpl:module'] = modname
return []
class ChapelClassMember(ChapelObject):
"""Description of Chapel class members, including attributes, procs,
and iters.
"""
@property
def chpl_type_name(self):
"""Returns iterator or method or '' depending on object type."""
if not self.objtype.endswith('method'):
return ''
elif self.objtype.startswith('iter'):
return 'iterator'
elif self.objtype == 'method':
return 'method'
else:
return ''
def get_signature_prefix(self, sig):
"""Return signature prefix based on sig. May include portion of the sig text,
if relevant (e.g. `proc foo()` will return 'proc' here.
"""
return self._get_sig_prefix(sig)
def needs_arglist(self):
"""Procs and iters need arglists. Attributes do not."""
return self.objtype.endswith('method')
def get_index_text(self, modname, name_cls):
"""Return text for index entry based on object type."""
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype.endswith('method'):
try:
clsname, methname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return _('%s()') % name
if modname and add_modules:
return _('%s() (%s.%s %s)') % \
(methname, modname, clsname, self.chpl_type_name)
else:
return _('%s() (%s %s)') % \
(methname, clsname, self.chpl_type_name)
elif self.objtype == 'attribute':
try:
clsname, attrname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
if modname and add_modules:
return _('%s (%s.%s attribute)') % (attrname, modname, clsname)
else:
return _('%s (%s attribute)') % (attrname, clsname)
else:
return ''
class ChapelClassObject(ChapelObject):
"""Chapel class and record description."""
def get_signature_prefix(self, sig):
"""Return class or record according to object type."""
return self.objtype + ' '
def get_index_text(self, modname, name_cls):
"""Return index entry text based on object type."""
if self.objtype in ('class', 'record'):
if not modname:
return _('%s (built-in %s)') % (name_cls[0], self.objtype)
return _('%s (%s in %s)') % (name_cls[0], self.objtype, modname)
else:
return ''
def before_content(self):
"""Called before parsing content. Push the class name onto the class name
stack. Used to construct the full name for members.
"""
ChapelObject.before_content(self)
if self.names:
self.env.temp_data['chpl:class'] = self.names[0][0]
self.clsname_set = True
class ChapelModuleLevel(ChapelObject):
"""Chapel module level functions, types, and variables (i.e. data directives)
descriptions.
"""
@property
def chpl_type_name(self):
"""Returns type, iterator, or procedure or '' depending on
object type.
"""
if self.objtype == 'type':
return 'type'
elif not self.objtype.endswith('function'):
return ''
elif self.objtype.startswith('iter'):
return 'iterator'
elif self.objtype == 'function':
return 'procedure'
else:
return ''
def get_signature_prefix(self, sig):
"""Return signature prefix based on sig. May include portion of the sig text,
if relevant (e.g. `proc foo()` will return `proc` here.
"""
return self._get_sig_prefix(sig)
def needs_arglist(self):
"""Procs and iters need arglists. Data directives do not."""
return self.objtype.endswith('function')
def get_index_text(self, modname, name_cls):
"""Return text for index entry based on object type."""
if self.objtype.endswith('function'):
if not modname:
return _('%s() (built-in %s)') % \
(name_cls[0], self.chpl_type_name)
return _('%s() (in module %s)') % (name_cls[0], modname)
elif self.objtype in ('data', 'type', 'enum'):
if not modname:
type_name = self.objtype
if type_name == 'data':
type_name = 'variable'
return _('%s (built-in %s)') % (name_cls[0], type_name)
return _('%s (in module %s)') % (name_cls[0], modname)
else:
return ''
class ChapelXRefRole(XRefRole):
"""Chapel cross-referencing role. Extends base XRefRole with special link
processing method. The Chapel link processing knows how to match a chapel
xref expression to the known objects, data, and modules in the current
project/documents.
"""
def process_link(self, env, refnode, has_explicit_title, title, target):
"""Called after parsing title and target text, and creating the reference
node. Alter the reference node and return it with chapel module and
class information, if relevant.
"""
refnode['chpl:module'] = env.temp_data.get('chpl:module')
refnode['chpl:class'] = env.temp_data.get('chpl:class')
if not has_explicit_title:
# Only has a meaning for the target.
title = title.lstrip('.')
# Only has a meaning for the title.
target = target.lstrip('~')
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot+1:]
# IF the first character is a dot, search more specific names
# first. Else, search builtins first.
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
class ChapelModuleIndex(Index):
"""Provides Chapel module index based on chpl:module."""
name = 'modindex'
localname = _('Chapel Module Index')
shortname = _('modules')
def generate(self, docnames=None):
"""Returns entries for index given by ``name``. If ``docnames`` is given,
restrict to entries referring to these docnames.
Retunrs tuple of ``(content, collapse)``. ``collapse`` is bool. When
True, sub-entries should start collapsed for output formats that
support collapsing.
``content`` is a sequence of ``(letter, entries)`` tuples. ``letter``
is the "heading" for the given ``entries``, in this case the starting
letter.
``entries`` is a sequence of single entries, where a single entry is a
sequence ``[name, subtype, docname, anchor, extra, qualifier,
description]``. These items are:
* ``name`` - name of the index entry to be displayed
* ``subtype`` - sub-entry related type:
* 0 - normal entry
* 1 - entry with sub-entries
* 2 - sub-entry
* ``docname`` - docname where the entry is located
* ``anchor`` - anchor for the entry within docname
* ``extra`` - extra info for the entry
* ``qualifier`` - qualifier for the description
* ``description`` - description for the entry
Qualifier and description are not rendered in some output formats.
"""
content = {}
# list of prefixes to ignore
ignores = self.domain.env.config['chapeldomain_modindex_common_prefix']
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(iteritems(self.domain.data['modules']),
key=lambda x: x[0].lower())
# sort out collapsible modules
prev_modname = ''
num_toplevels = 0
for modname, (docname, synopsis, platforms, deprecated) in modules:
# If given a list of docnames and current docname is not in it,
# skip this docname for the index.
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore):]
stripped = ignore
break
else:
stripped = ''
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ''
# Put the module in correct bucket (first letter).
entries = content.setdefault(modname[0].lower(), [])
package = modname.split('.')[0]
if package != modname:
# it's a submodule!
if prev_modname == package:
# first submodule - make parent a group head
if entries:
entries[-1][1] = 1
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append([stripped + package, 1, '', '', '', '', ''])
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = deprecated and _('Deprecated') or ''
entries.append([stripped + modname, subtype, docname,
'module-' + stripped + modname, platforms,
qualifier, synopsis])
prev_modname = modname
# apply heuristics when to collapse modindex at page load: only
# collapse if number of toplevel modules is larger than number of
# submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first leter
content = sorted(iteritems(content))
return content, collapse
class ChapelDomain(Domain):
"""Chapel language domain."""
name = 'chpl'
labels = 'Chapel'
object_types = {
'data': ObjType(_('data'), 'data', 'const', 'var', 'param', 'type'),
'type': ObjType(_('type'), 'type', 'data'),
'function': ObjType(_('function'), 'func', 'proc'),
'iterfunction': ObjType(_('iterfunction'), 'func', 'iter', 'proc'),
'enum': ObjType(_('enum'), 'enum'),
'class': ObjType(_('class'), 'class'),
'record': ObjType(_('record'), 'record'),
'method': ObjType(_('method'), 'meth', 'proc'),
'itermethod': ObjType(_('itermethod'), 'meth', 'iter'),
'attribute': ObjType(_('attribute'), 'attr'),
'module': ObjType(_('module'), 'mod'),
}
directives = {
'data': ChapelModuleLevel,
'type': ChapelModuleLevel,
'function': ChapelModuleLevel,
'iterfunction': ChapelModuleLevel,
# TODO: Consider making enums ChapelClassObject, then each constant
# becomes an attribute on the class. Then xrefs to each constant
# would be possible, plus it would scale to large numbers of
# constants. (thomasvandoren, 2015-03-12)
'enum': ChapelModuleLevel,
'class': ChapelClassObject,
'record': ChapelClassObject,
'method': ChapelClassMember,
'itermethod': ChapelClassMember,
'attribute': ChapelClassMember,
'module': ChapelModule,
'currentmodule': ChapelCurrentModule,
}
roles = {
'data': ChapelXRefRole(),
'const': ChapelXRefRole(),
'var': ChapelXRefRole(),
'param': ChapelXRefRole(),
'type': ChapelXRefRole(),
'func': ChapelXRefRole(),
'proc': ChapelXRefRole(),
'iter': ChapelXRefRole(),
'class': ChapelXRefRole(),
'record': ChapelXRefRole(),
'enum': ChapelXRefRole(),
'meth': ChapelXRefRole(),
'attr': ChapelXRefRole(),
'mod': ChapelXRefRole(),
'chplref': ChapelXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
'labels': { # labelname -> docname, labelid, sectionname
'chplmodindex': ('chpl-modindex', '', _('Chapel Module Index')),
},
'anonlabels': { # labelname -> docname, labelid
'chplmodindex': ('chpl-modindex', ''),
},
}
indices = [
ChapelModuleIndex,
]
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
todel = []
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
todel.append(fullname)
for fullname in todel:
del self.data['objects'][fullname]
todel = []
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
todel.append(modname)
for modname in todel:
del self.data['modules'][modname]
todel = []
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
todel.append(labelname)
for labelname in todel:
del self.data['labels'][labelname]
todel = []
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
todel.append(anonlabelname)
for anonlabelname in todel:
del self.data['anonlabels'][anonlabelname]
def find_obj(self, env, modname, classname, name, type_name, searchmode=0):
"""Find a Chapel object for "name", possibly with module or class/record
name. Returns a list of (name, object entry) tuples.
:arg int searchmode: If 1, search more specific names first. Otherwise,
search built-ins first and then get more specific.
"""
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
objects = self.data['objects']
matches = []
newname = None
if searchmode == 1:
if type_name is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type_name)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (fullname in objects and
objects[fullname][1] in objtypes):
newname = fullname
if not newname:
if (modname and modname + '.' + name in objects and
objects[modname + '.' + name][1] in objtypes):
newname = modname + '.' + name
elif name in objects and objects[name][1] in objtypes:
newname = name
else:
# "Fuzzy" search mode.
searchname = '.' + name
matches = [(oname, objects[oname]) for oname in objects
if oname.endswith(searchname) and
objects[oname][1] in objtypes]
else:
# NOTE: Search for exact match, object type is not considered.
if name in objects:
newname = name
elif type_name == 'mod':
# Only exact matches allowed for modules.
return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
newname = modname + '.' + name
elif (modname and classname and
modname + '.' + classname + '.' + name in objects):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, objects[newname]))
return matches
def resolve_xref(self, env, fromdocname, builder,
type_name, target, node, contnode):
"""Resolve the pending_xref *node* with give *type_name* and *target*. Returns
None if xref node can not be resolved. If xref can be resolved, returns
new node containing the *contnode*.
"""
# Special case the :chpl:chplref:`chplmodindex` instances.
if type_name == 'chplref':
if node['refexplicit']:
# Reference to anonymous label. The reference uses the supplied
# link caption.
docname, labelid = self.data['anonlabels'].get(
target, ('', ''))
sectname = node.astext()
else:
# Reference to named label. The final node will contain the
# section name after the label.
docname, labelid, sectname = self.data['labels'].get(
target, ('', '', ''))
if not docname:
return None
return self._make_refnode(
fromdocname, builder, docname, labelid, sectname, contnode)
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type_name, searchmode)
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(
builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
"""Similar to :py:meth:`ChapelDomain.resolve_xref`, but applies to *any* or
similar role where type is not known. This returns a list of tuples
with ("domain:role", newnode).
"""
modname = node.get('chpl:module')
clsname = node.get('chpl:class')
results = []
# Always search in "refspecific" mode with the :any: role.
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('chpl:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(
('chpl:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_refnode(self, fromdocname, builder, docname, labelid, sectname,
contnode, **kwargs):
"""Return reference node for something like ``:chpl:chplref:``."""
nodeclass = kwargs.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **kwargs)
innernode = nodes.emphasis(sectname, sectname)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# Set more info on contnode. In case the get_relative_uri call
# raises NoUri, the builder will then have to resolve these.
contnode = addnodes.pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def merge_domaindata(self, docnames, otherdata):
"""Merge in data regarding *docnames* from a different domaindata inventory
(coming froma subprocess in a parallel build).
"""
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.data['modules'][modname] = data
for labelname, data in otherdata['labels'].items():
if data[0] in docnames:
self.data['labels'][labelname] = data
for anonlabelname, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.data['anonlabels'][anonlabelname] = data
def get_objects(self):
"""Return iterable of "object descriptions", which are tuple with these items:
* `name`
* `dispname`
* `type`
* `docname`
* `anchor`
* `priority`
For details on each item, see
:py:meth:`~sphinx.domains.Domain.get_objects`.
"""
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type_name) in self.data['objects'].items():
if type_name != 'module': # modules are already handled
yield (refname, refname, type_name, docname, refname, 1)
def setup(app):
"""Add Chapel domain to Sphinx app."""
# First add the in-house lexer to override the pygments one
app.add_lexer('chapel', ChapelLexer)
app.add_config_value('chapeldomain_modindex_common_prefix', [], 'html')
app.add_domain(ChapelDomain)
|
chapel-lang/sphinxcontrib-chapeldomain
|
sphinxcontrib/chapeldomain/__init__.py
|
Python
|
apache-2.0
| 41,851
|
# This script tests the speed limit and the orientation lock API calls.
# Created by Toni Sagrista
from py4j.java_gateway import JavaGateway, GatewayParameters
gateway = JavaGateway(gateway_parameters=GatewayParameters(auto_convert=True))
gs = gateway.entry_point
gs.disableInput()
gs.cameraStop()
gs.maximizeInterfaceWindow()
gs.expandGuiComponent("CameraComponent")
# Camera speed limit
gs.setCameraSpeedLimit(0)
gs.sleep(1)
gs.setCameraSpeedLimit(1)
gs.sleep(1)
gs.setCameraSpeedLimit(3)
gs.sleep(1)
gs.setCameraSpeedLimit(4)
gs.sleep(1)
gs.setCameraSpeedLimit(5)
gs.sleep(1)
gs.setCameraSpeedLimit(6)
gs.sleep(1)
gs.setCameraSpeedLimit(7)
gs.sleep(1)
gs.setCameraSpeedLimit(8)
gs.sleep(1)
gs.setCameraSpeedLimit(9)
gs.sleep(1)
gs.setCameraSpeedLimit(10)
gs.sleep(1)
gs.setCameraSpeedLimit(11)
gs.sleep(1)
gs.setCameraSpeedLimit(12)
gs.sleep(1)
gs.setCameraSpeedLimit(13)
gs.sleep(2)
# Orientation lock
gs.setCameraOrientationLock(True)
gs.sleep(1)
gs.setCameraOrientationLock(False)
gs.sleep(1)
gs.enableInput()
gateway.close()
|
ari-zah/gaiasky
|
assets/scripts/tests/topspeed-orientation-test.py
|
Python
|
lgpl-3.0
| 1,039
|
import plotly as py
import plotly.graph_objs as go
color_Unoccupied = 'rgb(233,233,233)'
color_ExtremeCold = 'rgb(0,0,255)'
color_Cold = 'rgb(47,141,255)'
color_SlightlyCold = 'rgb(110,255,255)'
color_Comfortable = 'rgb(144,245,0)'
color_SlightlyWarm = 'rgb(255,204,0)'
color_Hot = 'rgb(255,111,71)'
color_ExtremeHot = 'rgb(255,0,0)'
def traceseries(colorPMV, xvalues, yvalues, matchcolor, name):
"""divided the color list to different list to use as different traces"""
matchX = []
matchY = []
for iid, color in enumerate(colorPMV):
if color == matchcolor:
matchX.append(xvalues[iid])
matchY.append(yvalues[iid])
trace = go.Scatter(
name=name,
x=matchX,
y=matchY,
mode='markers',
marker=dict(
size='4',
color=matchcolor, # set color equal to a variable
showscale=False,
line=dict(width=0.3, color=matchcolor)
)
)
return trace
def PMV_plotlyScatter(vname, colorPMV, xvalues, yvalues, stat, pickedID):
"""Plotting yearly comfort values"""
Unoccupied = traceseries(colorPMV, xvalues, yvalues, color_Unoccupied, "Unoccupied")
ExtremeCold = traceseries(colorPMV, xvalues, yvalues, color_ExtremeCold, "Extreme Cold: %d hrs" % (stat[0]))
Cold = traceseries(colorPMV, xvalues, yvalues, color_Cold, "Cold: %d hrs" % (stat[1]))
SlightlyCold = traceseries(colorPMV, xvalues, yvalues, color_SlightlyCold, "Slightly Cold: %d hrs" % (stat[2]))
Comfortable = traceseries(colorPMV, xvalues, yvalues, color_Comfortable, "Comfortable: %d hrs" % (stat[3]))
SlightlyWarm = traceseries(colorPMV, xvalues, yvalues, color_SlightlyWarm, "Slightly Warm: %d hrs" % (stat[4]))
Hot = traceseries(colorPMV, xvalues, yvalues, color_Hot, "Hot: %d hrs" % (stat[5]))
ExtremeHot = traceseries(colorPMV, xvalues, yvalues, color_ExtremeHot, "Extreme Hot: %d hrs" % (stat[6]))
data = [ExtremeHot, Hot, SlightlyWarm, Comfortable, SlightlyCold, Cold, ExtremeCold, Unoccupied]
layout = go.Layout(
width=1900, height=330,
title="Hourly Comfort C%d - %s" % (pickedID, vname),
xaxis=dict(
# fixedrange = True,
zeroline=False,
showline=False,
showgrid=False,
tick0=0,
dtick="M1",
tickformat="%b",
ticklen=3,
tickwidth=1,
),
yaxis=dict(
# fixedrange = True,
autotick=False,
showgrid=False,
zeroline=False,
showline=False,
ticks='outside',
tick0=0,
dtick=8,
ticklen=0,
tickwidth=1,
tickcolor='#000'
)
)
fig = go.Figure(data=data, layout=layout)
py.offline.plot(fig, filename='Yearly_PMV_%s_C%d.html' % (vname, pickedID),
image_filename="Yearly_PMV_%s_C%d" % (vname, pickedID), image_width=1900,
image_height=330) # image = 'png',
def PMV_BarStatID(pers, statname, statcolor, pickedID):
"""Plotting comfort statistic of selected pickedID"""
def stattrace(pickedid, statvalue, color, name):
trace = go.Bar(
x=[pickedid],
y=[statvalue],
name=name,
width=[0.1],
text=name,
marker=dict(color=color),
)
return trace
excold = stattrace(pickedID, pers[0], statcolor[0], statname[0])
cold = stattrace(pickedID, pers[1], statcolor[1], statname[1])
slcold = stattrace(pickedID, pers[2], statcolor[2], statname[2])
comf = stattrace(pickedID, pers[3], statcolor[3], statname[3])
slwarm = stattrace(pickedID, pers[4], statcolor[4], statname[4])
hot = stattrace(pickedID, pers[5], statcolor[5], statname[5])
exhot = stattrace(pickedID, pers[6], statcolor[6], statname[6])
data = [excold, cold, slcold, comf, slwarm, hot, exhot]
layout = go.Layout(barmode='stack', title='PMV Statistic on C%s' % (str(pickedID)), width=1920, height=1080)
fig = go.Figure(data=data, layout=layout)
py.offline.plot(fig, filename='PMV_Stat_PtsID.html')
def PMV_BarStatALL(statdict, statname, statcolor):
"""Plotting comfort statistic of all comfortID"""
def traceassign(statdict, statname, nameID):
xtrace = []
ytrace = []
for key in statdict.keys():
xtrace.append(key)
ytrace.append(statdict[key][nameID])
trace = go.Bar(
x=xtrace,
y=ytrace,
# width = [0.1],
name=statname[nameID],
marker=dict(color=statcolor[nameID]))
return trace
excold = traceassign(statdict, statname, 0)
cold = traceassign(statdict, statname, 1)
slcold = traceassign(statdict, statname, 2)
comf = traceassign(statdict, statname, 3)
slwarm = traceassign(statdict, statname, 4)
hot = traceassign(statdict, statname, 5)
exhot = traceassign(statdict, statname, 6)
data = [excold, cold, slcold, comf, slwarm, hot, exhot]
layout = go.Layout(
barmode='stack',
title='PMV Statistic',
width=1920, height=1080)
fig = go.Figure(data=data, layout=layout)
py.offline.plot(fig, filename='PMV_Stat_All.html')
def PMV_3DStatScatter(statdict, comfortpts):
"""3D plotting based on comfort pts X,Y coordinate and Z is statistic of comfortable hours during selected period"""
def getXY(comfortpts):
ptsx, ptsy = [], []
for pts in comfortpts.keys():
ptsx.append(comfortpts[pts][0])
ptsy.append(comfortpts[pts][1])
return ptsx, ptsy
def getStatlistasZ(statdict, statname):
# statname = ['Extreme Cold', 'Cold', 'Slightly Cold', 'Comfortable', 'Slightly Warm','Hot','Extreme Hot']
if statname == 'Extreme Cold':
statID = 0
elif statname == 'Cold':
statID = 1
elif statname == 'Slightly Cold':
statID = 2
elif statname == 'Comfortable':
statID = 3
elif statname == 'Slightly Warm':
statID = 4
elif statname == 'Hot':
statID = 5
elif statname == 'Extremem Hot':
statID = 6
Z3DScatter = []
for pts in statdict.keys():
Z3DScatter.append(statdict[pts][statID])
return Z3DScatter
ptsX, ptsY = getXY(comfortpts)
ptsZ = getStatlistasZ(statdict, "Comfortable")
trace = go.Scatter3d(
x=ptsX,
y=ptsY,
z=ptsZ,
mode='markers',
marker=dict(
size=8,
color='rgb(127, 127, 127)',
colorscale='Viridis',
opacity=1)
)
data = [trace]
layout = go.Layout(
margin=dict(l=0, r=0, b=0, t=0),
# zaxis = dict(autorange = False)
xaxis=dict(autorange=False)
)
fig = go.Figure(data=data, layout=layout)
py.offline.plot(fig, filename='PMV_Stat_3D.html')
|
vhoangTS/LizardPMVPlot
|
PMVPlotting.py
|
Python
|
gpl-3.0
| 7,189
|
import unittest
from biothings_explorer.registry import Registry
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from .utils import get_apis
reg = Registry()
class TestSingleHopQuery(unittest.TestCase):
def test_genomicentity2protein(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Protein",
input_cls="GenomicEntity",
input_id="SO",
output_id="PR",
pred="related_to",
values="SO:0001860",
)
seqd.query()
self.assertTrue("PR:000022738" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["PR:000022738"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2genomicentity(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(
output_cls="GenomicEntity",
input_cls="GenomicEntity",
pred="related_to",
input_id="SO",
output_id="SO",
values="SO:0001860",
)
seqd.query()
self.assertTrue("SO:0000165" in seqd.G)
self.assertTrue("SO:0000436" in seqd.G)
def test_genomicentity2chemicalsubstance(self):
"""Test gene-genomic entity"""
seqd = SingleEdgeQueryDispatcher(
output_cls="ChemicalSubstance",
input_cls="GenomicEntity",
input_id="SO",
output_id="CHEBI",
values="SO:0001860",
)
seqd.query()
self.assertTrue("CHEBI:17351" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["CHEBI:17351"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2gene(self):
"""Test gene-gene"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Gene",
input_cls="GenomicEntity",
input_id="SO",
values="SO:0001860",
)
seqd.query()
self.assertTrue("ARHGAP45" in seqd.G)
self.assertTrue("LRP5" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["LRP5"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2anatomy(self):
"""Test gene-anatomy"""
seqd = SingleEdgeQueryDispatcher(
output_cls="AnatomicalEntity",
input_cls="GenomicEntity",
input_id="SO",
output_id="UBERON",
values="SO:0001860",
)
seqd.query()
self.assertTrue("UBERON:0000104" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["UBERON:0000104"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2ma(self):
"""Test gene-molecular_activity"""
seqd = SingleEdgeQueryDispatcher(
output_cls="MolecularActivity",
input_cls="GenomicEntity",
input_id="SO",
output_id="MOP",
values="SO:0001860",
)
seqd.query()
self.assertTrue("MOP:0000569" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["MOP:0000569"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2bp(self):
"""Test gene-biological_process"""
seqd = SingleEdgeQueryDispatcher(
output_cls="BiologicalProcess",
input_cls="GenomicEntity",
input_id="SO",
output_id="GO",
values="SO:0001860",
)
seqd.query()
self.assertTrue("GO:0009056" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["GO:0009056"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2cc(self):
"""Test gene-cellular_component"""
seqd = SingleEdgeQueryDispatcher(
output_cls="CellularComponent",
input_cls="GenomicEntity",
input_id="SO",
output_id="GO",
values="SO:0001860",
)
seqd.query()
self.assertTrue("GO:0030121" in seqd.G)
edges = seqd.G["SO:SO:0001860"]["GO:0030121"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
def test_genomicentity2cell(self):
"""Test gene-cell"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Cell",
input_cls="GenomicEntity",
input_id="SO",
output_id="CL",
values="SO:0001860",
)
seqd.query()
self.assertTrue("CL:0000037" in seqd.G)
def test_genomicentity2disease(self):
"""Test gene-disease"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Disease",
input_cls="GenomicEntity",
input_id="SO",
output_id="DOID",
values="SO:0001860",
)
seqd.query()
self.assertTrue("DOID:0050784".upper() in seqd.G)
edges = seqd.G["SO:SO:0001860"]["DOID:0050784"]
self.assertTrue("CORD Genomic Entity API" in get_apis(edges))
|
biothings/biothings_explorer
|
tests/test_apis/test_cordgenomicentity.py
|
Python
|
apache-2.0
| 5,030
|
#val is the height of the throw, cross is a char
class Toss:
cross = ''
def __init__(self, _val, _cross):
self.val = _val
self.cross = _cross
#find the drop location of a toss at a given offset
def location(self, offset, length, right):
retval = ((self.val/2)+offset) % length
if right and self.cross == 'x':
return Toss(retval, 'l')
elif right:
return Toss(retval, 'r')
elif not right and self.cross == 'x':
return Toss(retval, 'r')
else:
return Toss(retval, 'l')
def __repr__(self):
if self.val >= 0 and self.val <= 9:
num = chr(self.val + ord('0'))
else:
num = chr(self.val - 10 + ord('a'))
if self.cross == ' ':
return "%s" % (num)
else:
return "%s%s" % (num, self.cross)
# this is meant to be used when finding transitions
# Tosses are expected to have a 'l' or 'r' as their cross
def __sub__(self, other):
val = self.val-other.val
cross = 0 if self.cross == other.cross else 1
return Toss(val, ' ' if cross == val % 2 else 'x')
def __eq__(self, other):
return self.cross == other.cross and self.val == other.val
|
JoshMermel/Juggle-Transition
|
toss.py
|
Python
|
gpl-2.0
| 1,272
|
# Copyright 2016. The Regents of the University of California.
# All rights reserved. Use of this source code is governed by
# a BSD-style license which can be found in the LICENSE file.
#
# Authors:
# 2016 Siddharth Iyer <sid8795@gmail.com>
# 2018 Soumick Chatterjee <soumick.chatterjee@ovgu.de> , WSL Support
import subprocess as sp
import tempfile as tmp
import cfl
import os
from wslsupport import PathCorrection
def bart(nargout, cmd, *args):
if type(nargout) != int or nargout < 0:
print("Usage: bart(<nargout>, <command>, <arguements...>)")
return None
try:
bart_path = os.environ['TOOLBOX_PATH'] + '/bart '
except:
bart_path = None
isWSL = False
if not bart_path:
if os.path.isfile('/usr/local/bin/bart'):
bart_path = '/usr/local/bin'
elif os.path.isfile('/usr/bin/bart'):
bart_path = '/usr/bin'
else:
bartstatus = os.system('wsl bart version -V')
if bartstatus==0:
bart_path = '/usr/bin'
isWSL = True
else:
raise Exception('Environment variable TOOLBOX_PATH is not set.')
name = tmp.NamedTemporaryFile().name
nargin = len(args)
infiles = [name + 'in' + str(idx) for idx in range(nargin)]
in_str = ' '.join(infiles)
for idx in range(nargin):
cfl.writecfl(infiles[idx], args[idx])
outfiles = [name + 'out' + str(idx) for idx in range(nargout)]
out_str = ' '.join(outfiles)
if os.name =='nt':
if isWSL:
#For WSL and modify paths
cmdWSL = PathCorrection(cmd)
in_strWSL = PathCorrection(in_str)
out_strWSL = PathCorrection(out_str)
ERR = os.system('wsl bart ' + cmdWSL + ' ' + in_strWSL + ' ' + out_strWSL)
else:
#For cygwin use bash and modify paths
ERR = os.system('bash.exe --login -c ' + bart_path + '"/bart ' + cmd.replace(os.path.sep, '/') + ' ' + in_str.replace(os.path.sep, '/') + ' ' + out_str.replace(os.path.sep, '/') + '"')
#TODO: Test with cygwin, this is just translation from matlab code
else:
ERR = os.system(bart_path + '/bart ' + cmd + ' ' + in_str + ' ' + out_str)
for elm in infiles:
if os.path.isfile(elm + '.cfl'):
os.remove(elm + '.cfl')
if os.path.isfile(elm + '.hdr'):
os.remove(elm + '.hdr')
output = []
for idx in range(nargout):
elm = outfiles[idx]
if not ERR:
output.append(cfl.readcfl(elm))
if os.path.isfile(elm + '.cfl'):
os.remove(elm + '.cfl')
if os.path.isfile(elm + '.hdr'):
os.remove(elm + '.hdr')
if ERR:
raise Exception("Command exited with an error.")
if nargout == 1:
output = output[0]
return output
|
sdimoudi/bart
|
python/bart.py
|
Python
|
bsd-3-clause
| 2,861
|
#python imports
import sys
import os
import time
import datetime
import subprocess
import json
import requests
from termcolor import colored
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Search - Description: Searches for any available data on a target against the Abuse.ch Malware Bazaar database.
***END DESCRIPTION***
'''
def POE(POE):
if (POE.logging == True):
LOG = logger()
newlogentry = ''
reputation_dump = ''
reputation_output_data = ''
malwarebazaar = ''
if (POE.logging == True):
newlogentry = 'Module: malware_bazaar_search'
LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)
if (POE.SHA256 == ''):
print (colored('\r\n[x] Unable to execute Malware Bazaar Search - hash value must be SHA256.', 'red', attrs=['bold']))
newlogentry = 'Unable to execute Malware Bazaar Search - hash value must be SHA256'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
return -1
global json
query_status = ''
first_seen = ''
last_seen = ''
signature = ''
sig_count = 0
output = POE.logdir + 'MalwareBazaarSearch.json'
FI = fileio()
print (colored('\r\n[*] Running abuse.ch Malware Bazaar Search against: ' + POE.target, 'white', attrs=['bold']))
malwarebazaar = "https://mb-api.abuse.ch/api/v1/" #API URL
data = { #Our header params
'query': 'get_info',
'hash': POE.SHA256,
}
response_dump = requests.post(malwarebazaar, data=data, timeout=15) # Give us the results as JSON
if (POE.debug == True):
print (response_dump)
try:
FI.WriteLogFile(output, response_dump.content.decode("utf-8", "ignore"))
print (colored('[*] Malware Bazaar data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
if ((POE.logging == True) and (POE.nolinksummary == False)):
newlogentry = 'Malware Bazaar data has been generated to file here: <a href=\"' + output + '\"> Malware Bazaar Host Output </a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[x] Unable to write Malware Bazaar data to file', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to write Malware Bazaar data to file'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
POE.csv_line += 'N/A,'
return -1
try:
#Open the file we just downloaded
print ('[-] Reading Malware Bazaar file: ' + output.strip())
with open(output.strip(), 'rb') as read_file:
data = json.load(read_file, cls=None)
read_file.close()
# Check what kind of results we have
query_status = data["query_status"]
print ('[*] query_status: ' + query_status)
if (query_status == 'ok'):
with open(output.strip(), 'r') as read_file:
for string in read_file:
if (POE.debug == True):
print ('[DEBUG] string: ' + string.strip())
if ('first_seen' in string):
first_seen = string.strip()
if ('last_seen' in string):
last_seen = string.strip()
if (('signature' in string) and (sig_count == 0)):
signature = string.strip()
sig_count += 1
print ('[*] Sample ' + first_seen.replace(',',''))
print ('[*] Sample ' + last_seen.replace(',',''))
print ('[*] Sample ' + signature.replace(',',''))
if (POE.logging == True):
newlogentry = 'Sample ' + first_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + last_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + signature.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'hash_not_found'):
print (colored('[-] The hash value has not been found...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'no_results'):
print (colored('[-] No results available for host...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Something weird happened...
else:
print (colored('[x] An error has occurred...', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'An error has occurred...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except Exception as e:
print (colored('[x] Error: ' + str(e) + ' Terminating...', 'red', attrs=['bold']))
read_file.close()
return -1
#Clean up before returning
read_file.close()
return 0
|
slaughterjames/static
|
modules/malware_bazaar_search.py
|
Python
|
gpl-2.0
| 5,588
|
"""
Stand-alone stream mocking decorator for easier imports.
"""
from functools import wraps
import sys
from StringIO import StringIO # No need for cStringIO at this time
class CarbonCopy(StringIO):
"""
A StringIO capable of multiplexing its writes to other buffer objects.
"""
def __init__(self, buffer='', cc=None):
"""
If ``cc`` is given and is a file-like object or an iterable of same,
it/they will be written to whenever this StringIO instance is written
to.
"""
StringIO.__init__(self, buffer)
if cc is None:
cc = []
elif hasattr(cc, 'write'):
cc = [cc]
self.cc = cc
def write(self, s):
StringIO.write(self, s)
for writer in self.cc:
writer.write(s)
def mock_streams(which):
"""
Replaces a stream with a ``StringIO`` during the test, then restores after.
Must specify which stream (stdout, stderr, etc) via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('both')
def func()
pass
If ``'both'`` is specified, not only will both streams be replaced with
StringIOs, but a new combined-streams output (another StringIO) will appear
at ``sys.stdall``. This StringIO will resemble what a user sees at a
terminal, i.e. both streams intermingled.
"""
both = (which == 'both')
stdout = (which == 'stdout') or both
stderr = (which == 'stderr') or both
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if both:
sys.stdall = StringIO()
fake_stdout = CarbonCopy(cc=sys.stdall)
fake_stderr = CarbonCopy(cc=sys.stdall)
else:
fake_stdout, fake_stderr = StringIO(), StringIO()
if stdout:
my_stdout, sys.stdout = sys.stdout, fake_stdout
if stderr:
my_stderr, sys.stderr = sys.stderr, fake_stderr
try:
func(*args, **kwargs)
finally:
if stdout:
sys.stdout = my_stdout
if stderr:
sys.stderr = my_stderr
if both:
del sys.stdall
return inner_wrapper
return mocked_streams_decorator
|
cmattoon/fabric
|
tests/mock_streams.py
|
Python
|
bsd-2-clause
| 2,480
|
import toolbox
import numpy as np
import pylab
data, params = toolbox.initialise('prepro.su')
cdps = np.unique(data['cdp'])
#recreate original velocity field
vels = {}
vels[753]= (2456.0, 0.153), (2772.1, 0.413), (3003.2, 0.612), (3076.1, 0.704), (3270.7, 1.056), (3367.9, 1.668), (3538.2, 2.204), (3671.9, 3.566), (3915.1, 5.908),
vels[3056]=(2456.0, 0.153), (2772.1, 0.413), (3003.2, 0.612), (3076.1, 0.704), (3270.7, 1.056), (3367.9, 1.668), (3538.2, 2.204), (3671.9, 3.566), (3915.1, 5.908),
params['cdp'] = cdps
params['vels'] = toolbox.build_vels(vels, **params)
np.array(params['vels']).tofile('vels_initial.bin')
params['vels'] = np.fromfile('vels_initial.bin').reshape(-1, params['ns'])
pylab.imshow(params['vels'].T, aspect='auto')
pylab.colorbar()
pylab.show()
|
stuliveshere/SeismicProcessing2015
|
prac2_student_working/02.0_brute_vels.py
|
Python
|
mit
| 780
|
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
import StringIO
import re
import sys
from nox.netapps.bindings_storage.bindings_directory import *
# This file contains utility functions that are likely to be useful
# to implementing many different webservices.
# convert a string that is intended to be a glob and
# make it so we can safely use it to create a python regex.
# this is based a method in dojo:
# dojo-release-1.1.1/dojo/data/util/filter.js
def glob_to_regex(glob_str, ignore_case=True):
rxp = ""
if ignore_case:
rxp += "(?i)"
rxp += "^"
i = 0
while i < len(glob_str):
c = glob_str[i]
if c == '\\':
rxp += c
i += 1
rxp += glob_str[i]
elif c == '*':
rxp += ".*"
elif c == '?':
rxp += "."
elif c == '$' or \
c == '^' or \
c == '/' or \
c == '+' or \
c == '.' or \
c == '|' or \
c == '(' or \
c == ')' or \
c == '{' or \
c == '}' or \
c == '[' or \
c == ']' :
rxp += "\\"
rxp += c
else :
rxp += c
i += 1
rxp += "$"
return rxp
# the 'args' dictionary of a json request
# object maps from strings to lists of strings.
# this flattens it to a map of strings to strings
# by just taking the first element of the list
# 'skip' indicates a list of keys that should not be copied
def flatten_args(args,skip = [] ):
new_args = {}
for key in args.keys():
if not key in skip:
new_args[key] = args[key][0]
return new_args
# returns a comparison function that can be passed to list's sort() method
# 'res_list' must contain at least one element
# assumes a list like:
# [ { attr1: "foo", attr2 : "bar" }, {attr1 : "bin", attr2 : "bash" } ]
def new_get_cmp_fn(res_list, attr_name, sort_descend ):
if not attr_name in res_list[0]:
raise Exception("invalid 'sort_attribute'. dict " \
"has no key = '%s'" % attr_name)
# if specified attribute is an int, we do int comparsion
# otherwise we do the standard string comparison
attr_is_int = isinstance(res_list[0][attr_name],int)
max = sys.maxint
min = -sys.maxint - 1
def descend_int(a,b):
a_val = a[attr_name]
b_val = b[attr_name]
if a_val is None: return max
if b_val is None: return min
return b_val - a_val
def descend_default(a,b):
return cmp(b[attr_name],a[attr_name])
def ascend_int(a,b):
a_val = a[attr_name]
b_val = b[attr_name]
if a_val is None: return min
if b_val is None: return max
return a_val - b_val
def ascend_default(a,b):
return cmp(a[attr_name],b[attr_name])
if sort_descend:
if attr_is_int:
return descend_int
else:
return descend_default
else : # ascending
if attr_is_int:
return ascend_int
else:
return ascend_default
#based on the contents of the 'filter' dict, this function
# returns a comparison function that can be passed to list's sort()
# method
def get_cmp_fn(filter,res_list):
sort_descend = filter["sort_descending"] == "true"
# no field to sort on was specified in the URL and
# no default specified by the webservice. Just do normal cmp
if not filter["sort_attribute"]:
if sort_descend:
return lambda a, b : cmp(b,a)
else:
return lambda a, b : cmp(a,b)
# test that sort attribute is a key in the dict
attr_name = filter["sort_attribute"]
if not attr_name in res_list[0]:
raise Exception("invalid 'sort_attribute'. dict " \
"has no key = '%s'" % attr_name)
# if specified attribute is an int, we do int comparsion
# otherwise we do the standard string comparison
attr_is_int = isinstance(res_list[0][attr_name],int)
if sort_descend:
if attr_is_int:
return lambda a,b: b[attr_name] - a[attr_name]
else:
return lambda a,b: cmp(b[attr_name],a[attr_name])
else : # ascending
if attr_is_int:
return lambda a,b: a[attr_name] - b[attr_name]
else:
return lambda a,b: cmp(a[attr_name],b[attr_name])
# sorts and slices a list of elements based on the
# content of the 'filter' dict.
def sort_and_slice_results(filter,res_list):
if(len(res_list) > 1):
entry_cmp = get_cmp_fn(filter,res_list)
res_list.sort(cmp=entry_cmp)
if "count" not in filter or filter["count"] == -1 :
filter["count"] = len(res_list)
if "start" not in filter:
filter["start"] = 0
slice_start = filter["start"]
slice_end = slice_start + filter["count"]
return res_list[slice_start : slice_end]
# standard defintion of the params a webservice can use to
# sort and filter results
def get_default_filter_arr(default_sort_attr = ""):
return [ ("start",0),("count", -1), ("sort_attribute",default_sort_attr),
("sort_descending","false")]
# parses arguments from a response into a dictionary.
# input is a list of (key,default) tuples, where key is a
# string and 'default' is of any type. If the request
# does not contain a parameter, it is set to the default value.
# The type of the 'default' value indicates how this method
# will cast a value provided in the request.
def parse_mandatory_args(request, key_arr):
filter = {}
for key,default in key_arr:
filter[key] = default
try:
type_obj = type(default)
filter[key] = type_obj(request.args[key][0])
except:
pass # an exception here is normal
return filter
def grab_boolean_arg(request, argname, default):
args = request.args
if not args.has_key(argname):
return default
if len(args[argname]) != 1:
return default
if args[argname][0].lower() == 'true':
return True
if args[argname][0].lower() == 'false':
return False
return default
def grab_string_arg(request, argname):
args = request.args
if not args.has_key(argname):
return None
if len(args[argname]) != 1:
return None
return args[argname][0]
def find_value_in_args(args,values):
for v in values:
if v in args: return v
raise Exception("none of the these names exists in URL: %s" % values);
def get_principal_type_from_args(args):
values = [ "switch","location","host","user","nwaddr","dladdr",
"hostnetname" ]
return find_value_in_args(args,values)
def get_nametype_from_string(str):
if str == "host" or str == "hosts": return Name.HOST
if str == "user" or str == "users" : return Name.USER
if str == "location" or str == "locations" : return Name.LOCATION
if str == "switch" or str == "switches" : return Name.SWITCH
if str == "port" or str == "ports" : return Name.PORT
raise Exception("%s is not a valid name type" % str)
# filters a list of items based on the contents of a web
# request's args.
# item_list : a list of string attribute-to-string value dictionaries that
# represent data rows needing to be filtered
# attr_list : the list of attribute names that, if present as keys
# in 'args', will be used to filter elements of 'item_list'
# how URL parameters from the webrequest should be used
# to filter attributes in each item.
# filter_map : map from attribute keys to the value that will be used as a
# glob filter for that attribute's data column.
def filter_item_list(item_list, attr_list, filter_map):
live_attrs = []
regex_map = {}
for attr in attr_list:
if attr in filter_map:
live_attrs.append(attr)
r = re.compile(glob_to_regex(filter_map[attr]),re.IGNORECASE)
regex_map[attr] = r
if len(live_attrs) == 0:
return item_list # nothing to filter
new_item_list = []
for item in item_list:
matches = True
for a in live_attrs:
r = regex_map[a]
if not r.match(item[a]):
matches = False
break
if matches:
new_item_list.append(item)
return new_item_list
def get_html_for_select_box(options, selected_val,select_node_attrs):
buf = StringIO.StringIO()
buf.write('<select')
for a in select_node_attrs:
buf.write(' %s="%s" ' % a)
buf.write('>')
for v in options:
if v == selected_val:
buf.write('<option value="%s" selected="true"> %s </option>' % (v,v))
else:
buf.write('<option value="%s"> %s </option>' % (v,v))
buf.write('</select>')
str = buf.getvalue()
buf.close()
return str
|
zainabg/NOX
|
src/nox/webapps/webservice/web_arg_utils.py
|
Python
|
gpl-3.0
| 9,178
|
default_app_config = 'easyaudit.apps.EasyAuditConfig'
|
Puciek/django-easy-audit
|
easyaudit/__init__.py
|
Python
|
gpl-3.0
| 53
|
import py2exe
from distutils.core import setup
import sys
sys.setrecursionlimit(5000)
# these .py modules will be converted to .pyo and included in the .exe
sys.argv.append("py2exe")
sys.argv.append("-q")
setup(
name='src',
packages=['src', 'src.Misc', 'src.ProblemSpecifications', 'src.Readers', 'src.Writers'],
package_dir={'src': '', 'src.Misc': 'Misc', 'src.ProblemSpecifications': 'ProblemSpecifications', 'src.Readers': 'Readers', 'src.Writers': 'Writers'},
package_data={'project': ['Misc/*', 'ProblemsSpecifications/*', 'Readers/*', 'Writers/*']},
options = {"py2exe": {"compressed": 1,
"optimize": 2,
"ascii": 1,
"bundle_files": 1}},
zipfile = None,
console = [{"script": 'Main.py'}],
exclude = ['os']
)
|
semanticsgirl/arithmetic-exercise-generator
|
src/setup.py
|
Python
|
gpl-3.0
| 804
|
# -*- coding: utf-8 -*-
# Copyright 2013 Dev in Cachu authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from django.conf import settings
from django.core import management
from django.test import client
from django.views.generic import detail
from lxml import html
from .. import models, views
class PalestraViewTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
management.call_command("loaddata", "palestrantes",
"palestras", verbosity=0)
@classmethod
def tearDownClass(cls):
management.call_command("flush", verbosity=0, interactive=False)
def setUp(self):
factory = client.RequestFactory()
url = "/programacao/hannibal-lecter/vito-corleone/" +\
"escalando-aplicacoes-django/"
self.request = factory.get(url)
def test_deve_herdar_de_DetailView(self):
assert issubclass(views.PalestraView, detail.DetailView)
def test_model_deve_ser_Palestra(self):
self.assertEqual(models.Palestra, views.PalestraView.model)
def test_context_object_name_deve_ser_palestra(self):
self.assertEqual("palestra", views.PalestraView.context_object_name)
def test_deve_renderizar_template_palestra_html(self):
self.assertEqual("palestra.html", views.PalestraView.template_name)
def test_deve_buscar_palestra_pelo_slug_informado(self):
palestra = models.Palestra.objects.get(pk=3)
view = views.PalestraView()
view.kwargs = {u"slug": palestra.slug, u"palestrantes": "james-bond"}
self.assertEqual(palestra, view.get_queryset()[0])
def test_deve_checar_palestrantes_informados(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView()
view.kwargs = {u"slug": palestra.slug,
u"palestrantes": "chico-buarque"}
self.assertEqual([], list(view.get_queryset()))
def test_retorna_apenas_uma_palestra_com_varios_palestrantes(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView()
view.kwargs = {u"slug": palestra.slug,
u"palestrantes": "hannibal-lecter/vito-corleone"}
self.assertEqual(1, view.get_queryset().count())
def test_deve_definir_canonical_url(self):
palestra = models.Palestra.objects.get(pk=1)
esperado = "%s/programacao/hannibal-lecter/vito-corleone/%s/" %\
(settings.BASE_URL, palestra.slug)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content)
obtido = dom.xpath('//link[@rel="canonical"]')[0].attrib["href"]
self.assertEqual(esperado, obtido)
def test_deve_definir_meta_keywords(self):
palestra = models.Palestra.objects.get(pk=1)
esperado = u"dev in cachu 2013, palestra, %s, %s" %\
(palestra.titulo,
palestra.nomes_palestrantes().replace(" e ", ", "))
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
obtido = dom.xpath('//meta[@name="keywords"]')[0].attrib["content"]
self.assertEqual(esperado, obtido)
def test_deve_definir_meta_description(self):
palestra = models.Palestra.objects.get(pk=1)
esperado = palestra.descricao
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
obtido = dom.xpath('//meta[@name="description"]')[0].attrib["content"]
self.assertEqual(esperado, obtido)
def test_deve_definir_og_title_com_titulo_da_palestra(self):
palestra = models.Palestra.objects.get(pk=1)
esperado = palestra.titulo
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
obtido = dom.xpath('//meta[@property="og:title"]')[0].attrib["content"]
self.assertEqual(esperado, obtido)
def test_deve_definir_og_type_como_activity(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
obtido = dom.xpath('//meta[@property="og:type"]')[0].attrib["content"]
self.assertEqual("activity", obtido)
def test_deve_definir_og_url_com_url_da_palestra(self):
palestra = models.Palestra.objects.get(pk=1)
esperado = "%s/programacao/hannibal-lecter/vito-corleone/%s/" %\
(settings.BASE_URL, palestra.slug)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
obtido = dom.xpath('//meta[@property="og:url"]')[0].attrib["content"]
self.assertEqual(esperado, obtido)
def test_deve_usar_foto_do_primeiro_palestrante_como_og_image(self):
palestra = models.Palestra.objects.get(pk=1)
palestrante = palestra.palestrantes.all()[:1].get()
esperado = "%s%s" % (settings.MEDIA_URL, palestrante.foto)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
obtido = dom.xpath('//meta[@property="og:image"]')[0].attrib["content"]
self.assertEqual(esperado, obtido)
def test_deve_usar_devincachu_como_og_sitename(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
tag = dom.xpath('//meta[@property="og:site_name"]')[0]
obtido = tag.attrib["content"]
self.assertEqual("Dev in Cachu 2013", obtido)
def test_deve_ter_og_description_com_descricao_da_palestra(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
tag = dom.xpath('//meta[@property="og:description"]')[0]
obtido = tag.attrib["content"]
self.assertEqual(palestra.descricao, obtido)
def test_deve_ter_og_locale_pt_BR(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
tag = dom.xpath('//meta[@property="og:locale"]')[0]
obtido = tag.attrib["content"]
self.assertEqual("pt_BR", obtido)
def test_deve_usar_fb_app_id_apropriado(self):
palestra = models.Palestra.objects.get(pk=1)
view = views.PalestraView.as_view()
response = view(self.request, slug=palestra.slug,
palestrantes=u"hannibal-lecter/vito-corleone")
response.render()
dom = html.fromstring(response.content.decode("utf-8"))
tag = dom.xpath('//meta[@property="fb:app_id"]')[0]
obtido = tag.attrib["content"]
self.assertEqual("220413784720358", obtido)
|
devincachu/devincachu-2014
|
devincachu/palestras/tests/test_view_palestra.py
|
Python
|
bsd-2-clause
| 8,536
|
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for PXE driver."""
import os
import tempfile
import fixtures
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils as json
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import base_image_service
from ironic.common import keystone
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.openstack.common import fileutils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
class PXEValidateParametersTestCase(db_base.DbTestCase):
def test__parse_deploy_info(self):
# make sure we get back the expected things
node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT)
info = pxe._parse_deploy_info(node)
self.assertIsNotNone(info.get('deploy_ramdisk'))
self.assertIsNotNone(info.get('deploy_kernel'))
self.assertIsNotNone(info.get('image_source'))
self.assertIsNotNone(info.get('root_gb'))
self.assertEqual(0, info.get('ephemeral_gb'))
def test__parse_driver_info_missing_deploy_kernel(self):
# make sure error is raised when info is missing
info = dict(DRV_INFO_DICT)
del info['pxe_deploy_kernel']
node = obj_utils.create_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
pxe._parse_driver_info,
node)
def test__parse_driver_info_missing_deploy_ramdisk(self):
# make sure error is raised when info is missing
info = dict(DRV_INFO_DICT)
del info['pxe_deploy_ramdisk']
node = obj_utils.create_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
pxe._parse_driver_info,
node)
def test__parse_driver_info_good(self):
# make sure we get back the expected things
node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
driver_info=DRV_INFO_DICT)
info = pxe._parse_driver_info(node)
self.assertIsNotNone(info.get('deploy_ramdisk'))
self.assertIsNotNone(info.get('deploy_kernel'))
class PXEPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEPrivateMethodsTestCase, self).setUp()
n = {
'driver': 'fake_pxe',
'instance_info': INST_INFO_DICT,
'driver_info': DRV_INFO_DICT,
}
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.node = obj_utils.create_test_node(self.context, **n)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test__get_image_info(self, show_mock):
properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
u'ramdisk_id': u'instance_ramdisk_uuid'}}
expected_info = {'ramdisk':
('instance_ramdisk_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'ramdisk')),
'kernel':
('instance_kernel_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'kernel')),
'deploy_ramdisk':
(DRV_INFO_DICT['pxe_deploy_ramdisk'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['pxe_deploy_kernel'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_kernel'))}
show_mock.return_value = properties
image_info = pxe._get_image_info(self.node, self.context)
show_mock.assert_called_once_with('glance://image_uuid',
method='get')
self.assertEqual(expected_info, image_info)
# test with saved info
show_mock.reset_mock()
image_info = pxe._get_image_info(self.node, self.context)
self.assertEqual(expected_info, image_info)
self.assertFalse(show_mock.called)
self.assertEqual('instance_kernel_uuid',
self.node.instance_info.get('kernel'))
self.assertEqual('instance_ramdisk_uuid',
self.node.instance_info.get('ramdisk'))
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(pxe_utils, '_build_pxe_config')
def _test_build_pxe_config_options(self, build_pxe_mock, deploy_opts_mock,
ipxe_enabled=False):
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385/', group='conductor')
self.config(disk_devices='sda', group='pxe')
fake_deploy_opts = {'iscsi_target_iqn': 'fake-iqn',
'deployment_id': 'fake-deploy-id',
'deployment_key': 'fake-deploy-key',
'disk': 'fake-disk',
'ironic_api_url': 'fake-api-url',
'boot_option': 'netboot'}
deploy_opts_mock.return_value = fake_deploy_opts
tftp_server = CONF.pxe.tftp_server
if ipxe_enabled:
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='pxe')
deploy_kernel = os.path.join(http_url, self.node.uuid,
'deploy_kernel')
deploy_ramdisk = os.path.join(http_url, self.node.uuid,
'deploy_ramdisk')
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
root_dir = CONF.pxe.http_root
else:
deploy_kernel = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'deploy_kernel')
deploy_ramdisk = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'deploy_ramdisk')
kernel = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'kernel')
ramdisk = os.path.join(CONF.pxe.tftp_root, self.node.uuid,
'ramdisk')
root_dir = CONF.pxe.tftp_root
expected_options = {
'ari_path': ramdisk,
'deployment_ari_path': deploy_ramdisk,
'pxe_append_params': 'test_param',
'aki_path': kernel,
'deployment_aki_path': deploy_kernel,
'tftp_server': tftp_server,
'boot_option': 'netboot'
}
expected_options.update(fake_deploy_opts)
image_info = {'deploy_kernel': ('deploy_kernel',
os.path.join(root_dir,
self.node.uuid,
'deploy_kernel')),
'deploy_ramdisk': ('deploy_ramdisk',
os.path.join(root_dir,
self.node.uuid,
'deploy_ramdisk')),
'kernel': ('kernel_id',
os.path.join(root_dir,
self.node.uuid,
'kernel')),
'ramdisk': ('ramdisk_id',
os.path.join(root_dir,
self.node.uuid,
'ramdisk'))
}
options = pxe._build_pxe_config_options(self.node,
image_info,
self.context)
self.assertEqual(expected_options, options)
def test__build_pxe_config_options(self):
self._test_build_pxe_config_options(ipxe_enabled=False)
def test__build_pxe_config_options_ipxe(self):
self._test_build_pxe_config_options(ipxe_enabled=True)
def test_get_token_file_path(self):
node_uuid = self.node.uuid
self.assertEqual('/tftpboot/token-' + node_uuid,
pxe._get_token_file_path(node_uuid))
@mock.patch.object(deploy_utils, 'fetch_images')
def test__cache_tftp_images_master_path(self, mock_fetch_image):
temp_dir = tempfile.mkdtemp()
self.config(tftp_root=temp_dir, group='pxe')
self.config(tftp_master_path=os.path.join(temp_dir,
'tftp_master_path'),
group='pxe')
image_path = os.path.join(temp_dir, self.node.uuid,
'deploy_kernel')
image_info = {'deploy_kernel': ('deploy_kernel', image_path)}
fileutils.ensure_tree(CONF.pxe.tftp_master_path)
pxe._cache_ramdisk_kernel(None, self.node, image_info)
mock_fetch_image.assert_called_once_with(None,
mock.ANY,
[('deploy_kernel',
image_path)],
True)
@mock.patch.object(pxe, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree')
@mock.patch.object(deploy_utils, 'fetch_images')
def test__cache_ramdisk_kernel(self, mock_fetch_image, mock_ensure_tree):
self.config(ipxe_enabled=False, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
pxe._cache_ramdisk_kernel(self.context, self.node, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
fake_pxe_info.values(), True)
@mock.patch.object(pxe, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree')
@mock.patch.object(deploy_utils, 'fetch_images')
def test__cache_ramdisk_kernel_ipxe(self, mock_fetch_image,
mock_ensure_tree):
self.config(ipxe_enabled=True, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.pxe.http_root, self.node.uuid)
pxe._cache_ramdisk_kernel(self.context, self.node, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
fake_pxe_info.values(),
True)
class PXEDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEDriverTestCase, self).setUp()
self.context.auth_token = '4562138218392831'
self.temp_dir = tempfile.mkdtemp()
self.config(tftp_root=self.temp_dir, group='pxe')
self.temp_dir = tempfile.mkdtemp()
self.config(images_path=self.temp_dir, group='pxe')
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.config(group='conductor', api_url='http://127.0.0.1:1234/')
def _create_token_file(self):
token_path = pxe._get_token_file_path(self.node.uuid)
open(token_path, 'w').close()
return token_path
def test_get_properties(self):
expected = pxe.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_good(self, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
def test_validate_fail(self):
info = dict(INST_INFO_DICT)
del info['image_source']
self.node.instance_info = json.dumps(info)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node['instance_info'] = json.dumps(info)
self.assertRaises(exception.MissingParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_boot_mode(self, mock_glance):
properties = {'capabilities': 'boot_mode:foo,cap2:value2'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_config_uefi_ipxe(self, mock_glance):
properties = {'capabilities': 'boot_mode:uefi,cap2:value2'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url='dummy_url', group='pxe')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_boot_option(self, mock_glance):
properties = {'capabilities': 'boot_option:foo,dog:wuff'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_uefi_and_localboot(self, mock_glance):
properties = {'capabilities': 'boot_mode:uefi,boot_option:local'}
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver='fake_pxe', instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT)
with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
@mock.patch.object(keystone, 'get_service_url')
def test_validate_good_api_url_from_config_file(self, mock_ks,
mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
# not present in the keystone catalog
mock_ks.side_effect = exception.KeystoneFailure
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
self.assertFalse(mock_ks.called)
@mock.patch.object(base_image_service.BaseImageService, '_show')
@mock.patch.object(keystone, 'get_service_url')
def test_validate_good_api_url_from_keystone(self, mock_ks, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
# present in the keystone catalog
mock_ks.return_value = 'http://127.0.0.1:1234'
# not present in the config file
self.config(group='conductor', api_url=None)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
mock_ks.assert_called_once_with()
@mock.patch.object(keystone, 'get_service_url')
def test_validate_fail_no_api_url(self, mock_ks):
# not present in the keystone catalog
mock_ks.side_effect = exception.KeystoneFailure
# not present in the config file
self.config(group='conductor', api_url=None)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
mock_ks.assert_called_once_with()
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_no_image_kernel_ramdisk_props(self, mock_glance):
mock_glance.return_value = {'properties': {}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.deploy.validate,
task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_glance_image_doesnt_exists(self, mock_glance):
mock_glance.side_effect = exception.ImageNotFound('not found')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_glance_conn_problem(self, mock_glance):
exceptions = (exception.GlanceConnectionFailed('connection fail'),
exception.ImageNotAuthorized('not authorized'),
exception.Invalid('invalid'))
mock_glance.side_effect = exceptions
for exc in exceptions:
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
def test_vendor_passthru_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.vendor.validate(task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-56789')
def test_vendor_passthru_validate_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, method='pass_deploy_info',
key='fake-56789')
def test_vendor_passthru_validate_key_notmatch(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-12345')
@mock.patch.object(pxe, '_get_image_info')
@mock.patch.object(pxe, '_cache_ramdisk_kernel')
@mock.patch.object(pxe, '_build_pxe_config_options')
@mock.patch.object(pxe_utils, 'create_pxe_config')
def test_prepare(self, mock_pxe_config,
mock_build_pxe, mock_cache_r_k,
mock_img_info):
mock_build_pxe.return_value = None
mock_img_info.return_value = None
mock_pxe_config.return_value = None
mock_cache_r_k.return_value = None
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.prepare(task)
mock_img_info.assert_called_once_with(task.node,
self.context)
mock_pxe_config.assert_called_once_with(
task, None, CONF.pxe.pxe_config_template)
mock_cache_r_k.assert_called_once_with(self.context,
task.node, None)
@mock.patch.object(keystone, 'token_expires_soon')
@mock.patch.object(deploy_utils, 'get_image_mb')
@mock.patch.object(iscsi_deploy, '_get_image_file_path')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_deploy(self, mock_node_set_boot, mock_node_power_action,
mock_update_dhcp, mock_cache_instance_image,
mock_get_image_file_path, mock_get_image_mb, mock_expire):
fake_img_path = '/test/path/test.img'
mock_get_image_file_path.return_value = fake_img_path
mock_get_image_mb.return_value = 1
mock_expire.return_value = False
self.config(deploy_callback_timeout=600, group='conductor')
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
state = task.driver.deploy.deploy(task)
self.assertEqual(state, states.DEPLOYWAIT)
mock_cache_instance_image.assert_called_once_with(
self.context, task.node)
mock_get_image_file_path.assert_called_once_with(task.node.uuid)
mock_get_image_mb.assert_called_once_with(fake_img_path)
mock_update_dhcp.assert_called_once_with(task, dhcp_opts)
mock_expire.assert_called_once_with(self.context.auth_token, 600)
mock_node_set_boot.assert_called_once_with(task, 'pxe',
persistent=True)
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
# ensure token file created
t_path = pxe._get_token_file_path(self.node.uuid)
token = open(t_path, 'r').read()
self.assertEqual(self.context.auth_token, token)
@mock.patch.object(keystone, 'get_admin_auth_token')
@mock.patch.object(keystone, 'token_expires_soon')
@mock.patch.object(deploy_utils, 'get_image_mb')
@mock.patch.object(iscsi_deploy, '_get_image_file_path')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_deploy_token_near_expiration(self, mock_node_set_boot,
mock_node_power_action, mock_update_dhcp,
mock_cache_instance_image, mock_get_image_file_path,
mock_get_image_mb, mock_expire, mock_admin_token):
mock_get_image_mb.return_value = 1
mock_expire.return_value = True
new_token = 'new_admin_token'
mock_admin_token.return_value = new_token
self.config(deploy_callback_timeout=600, group='conductor')
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
task.driver.deploy.deploy(task)
mock_expire.assert_called_once_with(self.context.auth_token, 600)
mock_admin_token.assert_called_once_with()
# ensure token file created with new token
t_path = pxe._get_token_file_path(self.node.uuid)
token = open(t_path, 'r').read()
self.assertEqual(new_token, token)
@mock.patch.object(deploy_utils, 'get_image_mb')
@mock.patch.object(iscsi_deploy, '_get_image_file_path')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
def test_deploy_image_too_large(self, mock_cache_instance_image,
mock_get_image_file_path,
mock_get_image_mb):
fake_img_path = '/test/path/test.img'
mock_get_image_file_path.return_value = fake_img_path
mock_get_image_mb.return_value = 999999
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
task.driver.deploy.deploy, task)
mock_cache_instance_image.assert_called_once_with(
self.context, task.node)
mock_get_image_file_path.assert_called_once_with(task.node.uuid)
mock_get_image_mb.assert_called_once_with(fake_img_path)
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down(self, node_power_mock):
with task_manager.acquire(self.context,
self.node.uuid) as task:
state = task.driver.deploy.tear_down(task)
self.assertEqual(states.DELETED, state)
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
def test_take_over(self, update_dhcp_mock):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
task.driver.deploy.take_over(task)
update_dhcp_mock.assert_called_once_with(
task, dhcp_opts)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
def test_take_over_localboot(self, update_dhcp_mock, clean_pxe_mock):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.instance_info['capabilities'] = {"boot_option": "local"}
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
task.driver.deploy.take_over(task)
update_dhcp_mock.assert_called_once_with(
task, dhcp_opts)
clean_pxe_mock.assert_called_once_with(task)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(deploy_utils, 'notify_deploy_complete')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
def _test_continue_deploy(self, is_localboot, mock_image_cache,
mock_switch_config, notify_mock,
mock_node_boot_dev, mock_clean_pxe):
token_path = self._create_token_file()
# set local boot
if is_localboot:
i_info = self.node.instance_info
i_info['capabilities'] = '{"boot_option": "local"}'
self.node.instance_info = i_info
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
root_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
boot_mode = None
def fake_deploy(**kwargs):
return root_uuid
self.useFixture(fixtures.MonkeyPatch(
'ironic.drivers.modules.deploy_utils.deploy',
fake_deploy))
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.vendor._continue_deploy(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
self.node.refresh()
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIsNone(self.node.last_error)
self.assertFalse(os.path.exists(token_path))
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
pxe_config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
notify_mock.assert_called_once_with('123456')
if is_localboot:
mock_node_boot_dev.assert_called_once_with(
mock.ANY, boot_devices.DISK, persistent=True)
mock_clean_pxe.assert_called_once_with(mock.ANY)
self.assertFalse(mock_switch_config.called)
else:
mock_switch_config.assert_called_once_with(
pxe_config_path, root_uuid, boot_mode)
self.assertFalse(mock_node_boot_dev.called)
self.assertFalse(mock_clean_pxe.called)
def test_continue_deploy(self):
self._test_continue_deploy(False)
def test_continue_deploy_localboot(self):
self._test_continue_deploy(True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
def test_continue_deploy_fail(self, mock_image_cache):
token_path = self._create_token_file()
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
def fake_deploy(**kwargs):
raise exception.InstanceDeployFailure("test deploy error")
self.useFixture(fixtures.MonkeyPatch(
'ironic.drivers.modules.deploy_utils.deploy',
fake_deploy))
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.vendor._continue_deploy(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
self.node.refresh()
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.assertIsNotNone(self.node.last_error)
self.assertFalse(os.path.exists(token_path))
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
def test_continue_deploy_ramdisk_fails(self, mock_image_cache):
token_path = self._create_token_file()
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
def fake_deploy(**kwargs):
pass
self.useFixture(fixtures.MonkeyPatch(
'ironic.drivers.modules.deploy_utils.deploy',
fake_deploy))
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.vendor._continue_deploy(
task, address='123456', iqn='aaa-bbb',
key='fake-56789', error='test ramdisk error')
self.node.refresh()
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
self.assertEqual(states.POWER_OFF, self.node.power_state)
self.assertIsNotNone(self.node.last_error)
self.assertFalse(os.path.exists(token_path))
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
def test_continue_deploy_invalid(self):
self.node.power_state = states.POWER_ON
self.node.provision_state = states.AVAILABLE
self.node.target_provision_state = states.NOSTATE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidState,
task.driver.vendor._continue_deploy,
task, address='123456', iqn='aaa-bbb',
key='fake-56789', error='test ramdisk error')
self.node.refresh()
self.assertEqual(states.AVAILABLE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
def test_lock_elevated(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(task.driver.vendor,
'_continue_deploy') as _cont_deploy_mock:
task.driver.vendor._continue_deploy(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
# lock elevated w/o exception
self.assertEqual(1, _cont_deploy_mock.call_count,
"_continue_deploy was not called once.")
def test_vendor_routes(self):
expected = ['pass_deploy_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(expected, list(vendor_routes))
def test_driver_routes(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual({}, driver_routes)
@mock.patch.object(utils, 'unlink_without_raise')
@mock.patch.object(iscsi_deploy, 'destroy_images')
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(pxe, 'TFTPImageCache')
@mock.patch.object(pxe, '_get_image_info')
class CleanUpTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT)
def test_clean_up(self, mock_image_info, mock_cache, mock_pxe_clean,
mock_iscsi_clean, mock_unlink):
mock_image_info.return_value = {'label': ['', 'deploy_kernel']}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_image_info.assert_called_once_with(task.node,
task.context)
mock_pxe_clean.assert_called_once_with(task)
mock_unlink.assert_any_call('deploy_kernel')
mock_unlink.assert_any_call(pxe._get_token_file_path(
task.node.uuid))
mock_iscsi_clean.assert_called_once_with(task.node.uuid)
mock_cache.return_value.clean_up.assert_called_once_with()
def test_clean_up_fail_get_image_info(self, mock_image_info, mock_cache,
mock_pxe_clean, mock_iscsi_clean,
mock_unlink):
mock_image_info.side_effect = exception.MissingParameterValue('foo')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_image_info.assert_called_once_with(task.node,
task.context)
mock_pxe_clean.assert_called_once_with(task)
mock_unlink.assert_called_once_with(pxe._get_token_file_path(
task.node.uuid))
mock_iscsi_clean.assert_called_once_with(task.node.uuid)
mock_cache.return_value.clean_up.assert_called_once_with()
class CleanUpFullFlowTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpFullFlowTestCase, self).setUp()
self.config(image_cache_size=0, group='pxe')
# Configure node
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(self.context,
driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
# Configure temporary directories
pxe_temp_dir = tempfile.mkdtemp()
self.config(tftp_root=pxe_temp_dir, group='pxe')
tftp_master_dir = os.path.join(CONF.pxe.tftp_root,
'tftp_master')
self.config(tftp_master_path=tftp_master_dir, group='pxe')
os.makedirs(tftp_master_dir)
instance_temp_dir = tempfile.mkdtemp()
self.config(images_path=instance_temp_dir,
group='pxe')
instance_master_dir = os.path.join(CONF.pxe.images_path,
'instance_master')
self.config(instance_master_path=instance_master_dir,
group='pxe')
os.makedirs(instance_master_dir)
self.pxe_config_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
os.makedirs(self.pxe_config_dir)
# Populate some file names
self.master_kernel_path = os.path.join(CONF.pxe.tftp_master_path,
'kernel')
self.master_instance_path = os.path.join(CONF.pxe.instance_master_path,
'image_uuid')
self.node_tftp_dir = os.path.join(CONF.pxe.tftp_root,
self.node.uuid)
os.makedirs(self.node_tftp_dir)
self.kernel_path = os.path.join(self.node_tftp_dir,
'kernel')
self.node_image_dir = iscsi_deploy._get_image_dir_path(self.node.uuid)
os.makedirs(self.node_image_dir)
self.image_path = iscsi_deploy._get_image_file_path(self.node.uuid)
self.config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
self.mac_path = pxe_utils._get_pxe_mac_path(self.port.address)
self.token_path = pxe._get_token_file_path(self.node.uuid)
# Create files
self.files = [self.config_path, self.master_kernel_path,
self.master_instance_path, self.token_path]
for fname in self.files:
# NOTE(dtantsur): files with 0 size won't be cleaned up
with open(fname, 'w') as fp:
fp.write('test')
os.link(self.config_path, self.mac_path)
os.link(self.master_kernel_path, self.kernel_path)
os.link(self.master_instance_path, self.image_path)
@mock.patch.object(pxe, '_get_image_info')
def test_clean_up_with_master(self, mock_get_image_info):
image_info = {'kernel': ('kernel_uuid',
self.kernel_path)}
mock_get_image_info.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_get_image_info.assert_called_once_with(task.node,
task.context)
for path in ([self.kernel_path, self.image_path, self.config_path]
+ self.files):
self.assertFalse(os.path.exists(path),
'%s is not expected to exist' % path)
|
ramineni/myironic
|
ironic/tests/drivers/test_pxe.py
|
Python
|
apache-2.0
| 44,025
|
#!/usr/bin/env python3
import pickle
import sys
import string
import heapq
from pos import core_tags
from nltk.tokenize import word_tokenize
if __name__ == '__main__':
score, npos_count = pickle.load(open("tagger.out", "rb"))
with open(sys.argv[1]) as input_file:
for line in input_file:
line = word_tokenize(line)
line = [w for w in line if w not in string.punctuation]
print(line)
prev_tag = '.'
result = []
for w in reversed(line):
w_score = []
found = False
for tag in core_tags:
key = (w, tag)
if key in score:
bigram = (tag, prev_tag)
if bigram in npos_count:
found = True
w_score.append((tag, score[key] * npos_count[bigram]))
if found == False:
w = 'UNK'
# TODO: Remove this copypaste
for tag in core_tags:
key = (w, tag)
if key in score:
bigram = (tag, prev_tag)
if bigram in npos_count:
w_score.append((tag, score[key] * npos_count[bigram]))
prev_tag = heapq.nlargest(1, w_score, lambda x: x[1])[0][0]
result.append(prev_tag)
print(result[::-1])
|
pdekker12/nlp2
|
Assignment2/run.py
|
Python
|
bsd-2-clause
| 1,496
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from utils import check_rc
from subprocess import *
import yarn_conf
def resourcemanager(action=None):
if action == "configure" or action == "start":
yarn_conf.configure(service="resourcemanager")
if action == "start" or action == "stop" or action == "status":
cmd = Popen(["service","hadoop-yarn-resourcemanager",action],stdout=PIPE,stderr=PIPE)
out,err = cmd.communicate()
rc=cmd.returncode
check_rc(rc,stdout=out,stderr=err)
|
keedio/keedio-stacks
|
KEEDIO/1.1/services/YARN/package/scripts/yarn_resourcemanager.py
|
Python
|
apache-2.0
| 1,254
|
# -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Brett Adams
# Copyright 2012-2015 Mario Frasca <mario@anche.no>.
#
# This file is part of bauble.classic.
#
# bauble.classic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# bauble.classic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bauble.classic. If not, see <http://www.gnu.org/licenses/>.
#
# report/mako/
#
import logging
logger = logging.getLogger(__name__)
import os
import shutil
import tempfile
import gtk
from mako.template import Template
from bauble.i18n import _
import bauble.db as db
import bauble.paths as paths
from bauble.plugins.report import FormatterPlugin, SettingsBox
import bauble.utils as utils
import bauble.utils.desktop as desktop
class MakoFormatterSettingsBox(SettingsBox):
def __init__(self, report_dialog=None, *args):
super(MakoFormatterSettingsBox, self).__init__(*args)
self.widgets = utils.load_widgets(
os.path.join(paths.lib_dir(),
"plugins", "report", 'mako', 'gui.glade'))
# keep a refefence to settings box so it doesn't get destroyed in
# remove_parent()
self.settings_box = self.widgets.settings_box
self.widgets.remove_parent(self.widgets.settings_box)
self.pack_start(self.settings_box)
def get_settings(self):
"""
"""
return {'template': self.widgets.template_chooser.get_filename(),
'private': self.widgets.private_check.get_active()}
def update(self, settings):
if 'template' in settings and settings['template']:
self.widgets.template_chooser.set_filename(settings['template'])
if 'private' in settings:
self.widgets.private_check.set_active(settings['private'])
_settings_box = MakoFormatterSettingsBox()
class MakoFormatterPlugin(FormatterPlugin):
"""
The MakoFormatterPlugins passes the values in the search
results directly to a Mako template. It is up to the template
author to validate the type of the values and act accordingly if not.
"""
title = _('Mako')
@classmethod
def install(cls, import_defaults=True):
logger.debug("installing mako plugin")
# copy default template files to user_dir
templates = ['example.csv', 'example.csv']
base_dir = os.path.join(paths.lib_dir(), "plugins", "report", 'mako')
for template in templates:
f = os.path.join(paths.user_dir(), template)
if not os.path.exists(f):
shutil.copy(os.path.join(base_dir, template), f)
@staticmethod
def get_settings_box():
return _settings_box
@staticmethod
def format(objs, **kwargs):
template_filename = kwargs['template']
use_private = kwargs.get('private', True)
if not template_filename:
msg = _('Please select a template.')
utils.message_dialog(msg, gtk.MESSAGE_WARNING)
return False
template = Template(
filename=template_filename, input_encoding='utf-8',
output_encoding='utf-8')
session = db.Session()
values = map(session.merge, objs)
report = template.render(values=values)
session.close()
# assume the template is the same file type as the output file
head, ext = os.path.splitext(template_filename)
fd, filename = tempfile.mkstemp(suffix=ext)
os.write(fd, report)
os.close(fd)
try:
desktop.open(filename)
except OSError:
utils.message_dialog(_('Could not open the report with the '
'default program. You can open the '
'file manually at %s') % filename)
return report
formatter_plugin = MakoFormatterPlugin
|
mfrasca/bauble.classic
|
bauble/plugins/report/mako/__init__.py
|
Python
|
gpl-2.0
| 4,268
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__='0.0.1'
import os
import re
import time
import platform
import SimpleXMLRPCServer
IP="0.0.0.0"
PORT=1234
class Proc:
def __init__(self):
self.proc_file = dict(
PROC = '/proc',
MEM_INFO = "/proc/meminfo", # 内存使用
CPU_INFO = '/proc/cpuinfo',
CPU_STAT = '/proc/stat', # cpu占用率
LOAD_AVG = '/proc/loadavg', # cpu负载
UP_TIME = '/proc/uptime',
NET_STAT = '/proc/net/dev', #网卡
MOUNTS = '/proc/mounts',
)
def __read_file(self, fi):
"""
dict of data from proc files (str:int).
Values are in kilobytes.
From:
http://forrst.com/posts/python_function_to_read_proc_meminfo-gRj
"""
re_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
result = dict()
with open(fi) as fil:
for line in fil:
match = re_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value = match.groups(['key', 'value'])
result[key] = float(value)
return result
def __meminfo(self):
"""
read /proc/meminfo
"""
return self.__read_file(self.proc_file['MEM_INFO'])
def mem_info(self):
"""
get mem from __meminfo()
result: KB
"""
dic = self.__meminfo()
mem_total = dic['MemTotal']
mem_used = mem_total - dic['MemFree'] - dic['Buffers'] - dic['Cached']
return dict(mem_total=mem_total, mem_used=mem_used)
def __cpu_info(self):
"""
read /proc/cpuinfo
"""
with open(self.proc_file['CPU_INFO']) as f:
model = list()
for line in f:
# Ignore the blank line separating the information between
# details about two processing units
if line.strip():
if line.rstrip('\n').startswith('model name'):
model_name = line.rstrip('\n').split(':')[1]
model.append(model_name)
return model
def cpu_info(self):
'''
每个处理器单元的模式名
'''
return self.__cpu_info()
def __read_cpu_str(self):
"""
Read the current system cpu usage from /proc/stat.
"""
with open(self.proc_file['CPU_STAT']) as fi:
for line in fi:
l = line.split()
if len(l) < 5:
continue
if l[0].startswith('cpu'):
return l
return []
def __read_cpu_usage(self):
"""
get cpu usage info from str
"""
cpustr=self.__read_cpu_str()
usni=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
return float(usn), float(usni)
def cpu_usage(self):
"""
get cpu avg used by percent
And the __read_cpu_usage()
From:
http://outofmemory.cn/code-snippet/3428/python-jiankong-linux-cpu-usage-lv
"""
usn1, usni1 = self.__read_cpu_usage()
time.sleep(2)
usn2, usni2 = self.__read_cpu_usage()
cpuper=(usn2-usn1)/(usni2-usni1)
return cpuper
def __load_stat(self):
"""
** 系统平均负载
前三个数字是1、5、15分钟内的平均进程数。
第四个值的分子是正在运行的进程数,分母是进程总数
最后一个是最近运行的进程ID号
"""
with open(self.proc_file['LOAD_AVG']) as fi:
con = fi.read().split()
loadavg = dict(
lavg_1 = con[0],
lavg_5 = con[1],
lavg_15 = con[2],
nr = con[3],
last_pid = con[4],
)
return loadavg
def load_avg(self):
"""
系统平均负载
"""
return self.__load_stat()
def net_stat(self):
"""
获取网卡流量信息 /proc/net/dev
返回dict,单位byte
"""
with open(self.proc_file['NET_STAT']) as fi:
net_dump = fi.readlines()
device_data={}
#data = namedtuple('data',['rx','tx'])
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = {'rx':float(line[1].split()[0])/(1024.0*1024.0),
'tx':float(line[1].split()[8])/(1024.0*1024.0)}
return device_data
def uptime_stat(self):
"""
查看开机时间
From:
http://wangwei007.blog.51cto.com/68019/1047061
"""
uptime = dict()
with open(self.proc_file['UP_TIME']) as fi:
con = fi.read().split()
all_sec = float(con[0])
MINUTE,HOUR,DAY = 60,3600,86400
uptime['day'] = int(all_sec / DAY )
uptime['hour'] = int((all_sec % DAY) / HOUR)
uptime['minute'] = int((all_sec % HOUR) / MINUTE)
uptime['second'] = int(all_sec % MINUTE)
uptime['Free rate'] = float(con[1]) / float(con[0])
return uptime
def disk_stat(self):
'''
xmlrfclib 中传输字典时,
字符串和float能成功。
'''
hd=dict()
disk = os.statvfs("/")
hd['available'] = disk.f_bsize * disk.f_bavail
hd['capacity'] = disk.f_bsize * disk.f_blocks
hd['used'] = disk.f_bsize * disk.f_bfree
hd = {i:float(hd[i]) for i in hd}
return hd
def process_num(self):
"""
进程数目
"""
num = 0
for subdir in os.listdir(self.proc_file['PROC']):
if subdir.isdigit():
num += 1
return num
def __fs_size(self,path):
""" 获取分区大小
"""
_info = os.statvfs(path)
total = _info.f_frsize * _info.f_blocks
free = _info.f_frsize * _info.f_bavail
return float(total), float(free)
def partition(self):
""" 获取分区信息
From:
http://aspirer2004.blog.163.com/blog/static/106764720133160452041/
"""
with open(self.proc_file['MOUNTS'], 'r') as f:
mounts = f.readlines()
result = dict()
for mount in mounts:
if mount.startswith('/dev/'):
dev, target = mount.split()[:2]
result[target]={
'dev': dev,
'total': self.__fs_size(target)[0],
'free': self.__fs_size(target)[1],
}
return result
def get_uname(self):
"""
system uname
eg:
('Linux', 'fedora.echorand', '3.7.4-204.fc18.x86_64',
'#1 SMP Wed Jan 23 16:44:29 UTC 2013', 'x86_64')
"""
return platform.uname()
def get_system(self):
'''
get system platform eg: linux or windows
'''
return platform.system()
def get_release(self):
"""
get release version num
"""
return platform.release()
def get_linux_distribution(self):
'''
get info about linux distribution
'''
return platform.linux_distribution()
def get_architecture(self):
"""
return 64bit or 32bit
"""
return platform.architecture()[0]
def get_node(self):
"""
return node name
hostname
"""
return platform.node()
if __name__ == '__main__':
server = SimpleXMLRPCServer.SimpleXMLRPCServer((IP, PORT))
obj = Proc()
server.register_instance(obj)
server.serve_forever()
|
ymero/ServerMonitor
|
Clients/Linux_like/server_client/ServerRun.py
|
Python
|
mit
| 7,926
|
#
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
# Copied here on 04/Dec/2021, as distutils will be deprecated in Py3.10
# This class is now part of the 'packaging' tool, but taking on an entire dependency for a single class seems silly
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* _cmp compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import re
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes; and route
rich comparisons to _cmp.
"""
def __init__(self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__(self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
def __eq__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c == 0
def __lt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c >= 0
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# _cmp (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion(Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
def __init__(self, vstring=None):
if vstring:
self.parse(vstring)
def parse(self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = [x for x in self.component_re.split(vstring) if x and x != "."]
for i, obj in enumerate(components):
try:
components[i] = int(obj)
except ValueError:
pass
self.version = components
def __str__(self):
return self.vstring
def __repr__(self):
return "LooseVersion ('%s')" % str(self)
def _cmp(self, other):
if isinstance(other, str):
other = LooseVersion(other)
if self.version == other.version:
return 0
if self.version < other.version:
return -1
if self.version > other.version:
return 1
# end class LooseVersion
|
spulec/moto
|
moto/utilities/distutils_version.py
|
Python
|
apache-2.0
| 9,112
|
import logging
import logging.config
import sys
import threading
import os
from amberclient.collision_avoidance.collision_avoidance_proxy import CollisionAvoidanceProxy
from amberclient.common.amber_client import AmberClient
from amberclient.location.location import LocationProxy
from amberclient.roboclaw.roboclaw import RoboclawProxy
from amberdriver.common.message_handler import MessageHandler
from amberdriver.drive_to_point import drive_to_point_pb2
from amberdriver.drive_to_point.drive_to_point import DriveToPoint
from amberdriver.tools import config
__author__ = 'paoolo'
pwd = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig('%s/drive_to_point.ini' % pwd)
config.add_config_ini('%s/drive_to_point.ini' % pwd)
LOGGER_NAME = 'DriveToPointController'
USE_COLLISION_AVOIDANCE = config.DRIVE_TO_POINT_USE_COLLISION_AVOIDANCE == 'True'
class DriveToPointController(MessageHandler):
def __init__(self, pipe_in, pipe_out, driver):
MessageHandler.__init__(self, pipe_in, pipe_out)
self.__drive_to_point = driver
self.__logger = logging.getLogger(LOGGER_NAME)
def handle_data_message(self, header, message):
if message.HasExtension(drive_to_point_pb2.setTargets):
self.__handle_set_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getNextTarget):
self.__handle_get_next_target(header, message)
elif message.HasExtension(drive_to_point_pb2.getNextTargets):
self.__handle_get_next_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getVisitedTarget):
self.__handle_get_visited_target(header, message)
elif message.HasExtension(drive_to_point_pb2.getVisitedTargets):
self.__handle_get_visited_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getConfiguration):
self.__handle_get_configuration(header, message)
else:
self.__logger.warning('No request in message')
def __handle_set_targets(self, header, message):
self.__logger.debug('Set targets')
targets = message.Extensions[drive_to_point_pb2.targets]
targets = zip(targets.longitudes, targets.latitudes, targets.radiuses)
self.__drive_to_point.set_targets(targets)
@MessageHandler.handle_and_response
def __handle_get_next_target(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get next target')
next_target, current_location = self.__drive_to_point.get_next_target_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend([next_target[0]])
targets.latitudes.extend([next_target[1]])
targets.radiuses.extend([next_target[2]])
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getNextTarget] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_next_targets(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get next targets')
next_targets, current_location = self.__drive_to_point.get_next_targets_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend(map(lambda next_target: next_target[0], next_targets))
targets.latitudes.extend(map(lambda next_target: next_target[1], next_targets))
targets.radiuses.extend(map(lambda next_target: next_target[2], next_targets))
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getNextTargets] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_visited_target(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get visited target')
visited_target, current_location = self.__drive_to_point.get_visited_target_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend([visited_target[0]])
targets.latitudes.extend([visited_target[1]])
targets.radiuses.extend([visited_target[2]])
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getVisitedTarget] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_visited_targets(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get visited targets')
visited_targets, current_location = self.__drive_to_point.get_visited_targets_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend(map(lambda target: target[0], visited_targets))
targets.latitudes.extend(map(lambda target: target[1], visited_targets))
targets.radiuses.extend(map(lambda target: target[2], visited_targets))
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getVisitedTargets] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_configuration(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get configuration')
configuration = response_message.Extensions[drive_to_point_pb2.configuration]
configuration.maxSpeed = self.__drive_to_point.MAX_SPEED
response_message.Extensions[drive_to_point_pb2.getConfiguration] = True
return response_header, response_message
def handle_subscribe_message(self, header, message):
self.__logger.debug('Subscribe action, nothing to do...')
def handle_unsubscribe_message(self, header, message):
self.__logger.debug('Unsubscribe action, nothing to do...')
def handle_client_died_message(self, client_id):
self.__logger.info('Client %d died, stop!', client_id)
self.__drive_to_point.set_targets([])
if __name__ == '__main__':
client_for_location = AmberClient('127.0.0.1', name="location")
client_for_driver = AmberClient('127.0.0.1', name="driver")
location_proxy = LocationProxy(client_for_location, 0)
if USE_COLLISION_AVOIDANCE:
driver_proxy = CollisionAvoidanceProxy(client_for_driver, 0)
else:
driver_proxy = RoboclawProxy(client_for_driver, 0)
drive_to_point = DriveToPoint(driver_proxy, location_proxy)
driving_thread = threading.Thread(target=drive_to_point.driving_loop, name="driving-thread")
driving_thread.start()
location_thread = threading.Thread(target=drive_to_point.location_loop, name="location-thread")
location_thread.start()
controller = DriveToPointController(sys.stdin, sys.stdout, drive_to_point)
controller()
|
showmen15/testEEE
|
src/amberdriver/drive_to_point/drive_to_point_controller.py
|
Python
|
mit
| 7,564
|
r"""
The "tests" submodule within the "dstauffman2.games.pentago" module is a full test suite based on
the Python `unittest` library.
Notes
-----
#. Written by David C. Stauffer in January 2016.
"""
#%% Imports
# None
#%% Unit test
if __name__ == '__main__':
pass
|
DStauffman/dstauffman2
|
dstauffman2/games/pentago/tests/__init__.py
|
Python
|
lgpl-3.0
| 272
|
#!/usr/bin/env python3
# Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class GearBox(Boxes):
"""Gearbox with multiple identical stages"""
ui_group = "Part"
def __init__(self):
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.argparser.add_argument(
"--teeth1", action="store", type=int, default=8,
help="number of teeth on ingoing shaft")
self.argparser.add_argument(
"--teeth2", action="store", type=int, default=20,
help="number of teeth on outgoing shaft")
self.argparser.add_argument(
"--modulus", action="store", type=float, default=3,
help="modulus of the theeth in mm")
self.argparser.add_argument(
"--shaft", action="store", type=float, default=6.,
help="diameter of the shaft")
self.argparser.add_argument(
"--stages", action="store", type=int, default=4,
help="number of stages in the gear reduction")
def render(self):
if self.teeth2 < self.teeth1:
self.teeth2, self.teeth1 = self.teeth1, self.teeth2
pitch1, size1, xxx = self.gears.sizes(teeth=self.teeth1, dimension=self.modulus)
pitch2, size2, xxx = self.gears.sizes(teeth=self.teeth2, dimension=self.modulus)
t = self.thickness
x = 1.1 * t * self.stages
if self.stages == 1:
y = size1 + size2
y1 = y / 2 - (pitch1 + pitch2) + pitch1
y2 = y / 2 + (pitch1 + pitch2) - pitch2
else:
y = 2 * size2
y1 = y / 2 - (pitch1 + pitch2) / 2
y2 = y / 2 + (pitch1 + pitch2) / 2
h = max(size1, size2) + t
b = "F"
t = "e" # prepare for close box
mh = self.shaft
def sideCB():
self.hole(y1, h / 2, mh / 2)
self.hole(y2, h / 2, mh / 2)
self.moveTo(self.thickness, self.thickness)
self.rectangularWall(y, h, [b, "f", t, "f"], callback=[sideCB], move="right")
self.rectangularWall(x, h, [b, "F", t, "F"], move="up")
self.rectangularWall(x, h, [b, "F", t, "F"])
self.rectangularWall(y, h, [b, "f", t, "f"], callback=[sideCB], move="left")
self.rectangularWall(x, h, [b, "F", t, "F"], move="up only")
self.rectangularWall(x, y, "ffff", move="up")
profile_shift = 20
pressure_angle = 20
for i in range(self.stages - 1):
self.gears(teeth=self.teeth2, dimension=self.modulus, angle=pressure_angle,
mount_hole=mh, profile_shift=profile_shift, move="up")
self.gears(teeth=self.teeth2, dimension=self.modulus, angle=pressure_angle,
mount_hole=mh, profile_shift=profile_shift, move="right")
for i in range(self.stages):
self.gears(teeth=self.teeth1, dimension=self.modulus, angle=pressure_angle,
mount_hole=mh, profile_shift=profile_shift, move="down")
|
florianfesti/boxes
|
boxes/generators/gearbox.py
|
Python
|
gpl-3.0
| 3,685
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Python packages, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import re
import sys
import tempfile
from distutils.version import LooseVersion
from vsc.utils import fancylogger
from vsc.utils.missing import nub
import easybuild.tools.environment as env
from easybuild.easyblocks.python import EXTS_FILTER_PYTHON_PACKAGES
from easybuild.framework.easyconfig import CUSTOM
from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, rmtree2, which
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
# not 'easy_install' deliberately, to avoid that pkg installations listed in easy-install.pth get preference
# '.' is required at the end when using easy_install/pip in unpacked source dir
EASY_INSTALL_INSTALL_CMD = "%(python)s setup.py easy_install --prefix=%(prefix)s %(installopts)s %(loc)s"
PIP_INSTALL_CMD = "pip install --prefix=%(prefix)s %(installopts)s %(loc)s"
SETUP_PY_INSTALL_CMD = "%(python)s setup.py install --prefix=%(prefix)s %(installopts)s"
SETUP_PY_DEVELOP_CMD = "%(python)s setup.py develop --prefix=%(prefix)s %(installopts)s"
UNKNOWN = 'UNKNOWN'
def pick_python_cmd(req_maj_ver=None, req_min_ver=None):
"""
Pick 'python' command to use, based on specified version requirements.
If the major version is specified, it must be an exact match (==).
If the minor version is specified, it is considered a minimal minor version (>=).
List of considered 'python' commands (in order)
* 'python' available through $PATH
* 'python<major_ver>' available through $PATH
* 'python<major_ver>.<minor_ver>' available through $PATH
* Python executable used in current session (sys.executable)
"""
log = fancylogger.getLogger('pick_python_cmd', fname=False)
def check_python_cmd(python_cmd):
"""Check whether specified Python command satisfies requirements."""
# check whether specified Python command is available
if os.path.isabs(python_cmd):
if not os.path.isfile(python_cmd):
log.debug("Python command '%s' does not exist", python_cmd)
return False
else:
python_cmd_path = which(python_cmd)
if python_cmd_path is None:
log.debug("Python command '%s' not available through $PATH", python_cmd)
return False
if req_maj_ver is not None:
if req_min_ver is None:
req_majmin_ver = '%s.0' % req_maj_ver
else:
req_majmin_ver = '%s.%s' % (req_maj_ver, req_min_ver)
pycode = 'import sys; print("%s.%s" % sys.version_info[:2])'
out, _ = run_cmd("%s -c '%s'" % (python_cmd, pycode), simple=False)
out = out.strip()
# (strict) check for major version
maj_ver = out.split('.')[0]
if maj_ver != str(req_maj_ver):
log.debug("Major Python version does not match: %s vs %s", maj_ver, req_maj_ver)
return False
# check for minimal minor version
if LooseVersion(out) < LooseVersion(req_majmin_ver):
log.debug("Minimal requirement for minor Python version not satisfied: %s vs %s", out, req_majmin_ver)
return False
# all check passed
log.debug("All check passed for Python command '%s'!", python_cmd)
return True
# compose list of 'python' commands to consider
python_cmds = ['python']
if req_maj_ver:
python_cmds.append('python%s' % req_maj_ver)
if req_min_ver:
python_cmds.append('python%s.%s' % (req_maj_ver, req_min_ver))
python_cmds.append(sys.executable)
log.debug("Considering Python commands: %s", ', '.join(python_cmds))
# try and find a 'python' command that satisfies the requirements
res = None
for python_cmd in python_cmds:
if check_python_cmd(python_cmd):
log.debug("Python command '%s' satisfies version requirements!", python_cmd)
if os.path.isabs(python_cmd):
res = python_cmd
else:
res = which(python_cmd)
log.debug("Absolute path to retained Python command: %s", res)
break
else:
log.debug("Python command '%s' does not satisfy version requirements (maj: %s, min: %s), moving on",
req_maj_ver, req_min_ver, python_cmd)
return res
def det_pylibdir(plat_specific=False, python_cmd=None):
"""Determine Python library directory."""
log = fancylogger.getLogger('det_pylibdir', fname=False)
if python_cmd is None:
# use 'python' that is listed first in $PATH if none was specified
python_cmd = 'python'
# determine Python lib dir via distutils
# use run_cmd, we can to talk to the active Python, not the system Python running EasyBuild
prefix = '/tmp/'
args = 'plat_specific=%s, prefix="%s"' % (plat_specific, prefix)
pycode = "import distutils.sysconfig; print(distutils.sysconfig.get_python_lib(%s))" % args
cmd = "%s -c '%s'" % (python_cmd, pycode)
log.debug("Determining Python library directory using command '%s'", cmd)
out, ec = run_cmd(cmd, simple=False, force_in_dry_run=True)
txt = out.strip().split('\n')[-1]
# value obtained should start with specified prefix, otherwise something is very wrong
if not txt.startswith(prefix):
raise EasyBuildError("Last line of output of %s does not start with specified prefix %s: %s (exit code %s)",
cmd, prefix, out, ec)
pylibdir = txt[len(prefix):]
log.debug("Determined pylibdir using '%s': %s", cmd, pylibdir)
return pylibdir
class PythonPackage(ExtensionEasyBlock):
"""Builds and installs a Python package, and provides a dedicated module file."""
@staticmethod
def extra_options(extra_vars=None):
"""Easyconfig parameters specific to Python packages."""
if extra_vars is None:
extra_vars = {}
extra_vars.update({
'unpack_sources': [True, "Unpack sources prior to build/install", CUSTOM],
'req_py_majver': [2, "Required major Python version (only relevant when using system Python)", CUSTOM],
'req_py_minver': [6, "Required minor Python version (only relevant when using system Python)", CUSTOM],
'runtest': [True, "Run unit tests.", CUSTOM], # overrides default
'use_easy_install': [False, "Install using '%s'" % EASY_INSTALL_INSTALL_CMD, CUSTOM],
'use_pip': [False, "Install using '%s'" % PIP_INSTALL_CMD, CUSTOM],
'use_setup_py_develop': [False, "Install using '%s'" % SETUP_PY_DEVELOP_CMD, CUSTOM],
'zipped_egg': [False, "Install as a zipped eggs (requires use_easy_install)", CUSTOM],
})
return ExtensionEasyBlock.extra_options(extra_vars=extra_vars)
def __init__(self, *args, **kwargs):
"""Initialize custom class variables."""
super(PythonPackage, self).__init__(*args, **kwargs)
self.sitecfg = None
self.sitecfgfn = 'site.cfg'
self.sitecfglibdir = None
self.sitecfgincdir = None
self.testinstall = False
self.testcmd = None
self.unpack_options = ''
self.python_cmd = None
self.pylibdir = UNKNOWN
self.all_pylibdirs = [UNKNOWN]
# make sure there's no site.cfg in $HOME, because setup.py will find it and use it
home = os.path.expanduser('~')
if os.path.exists(os.path.join(home, 'site.cfg')):
raise EasyBuildError("Found site.cfg in your home directory (%s), please remove it.", home)
if not 'modulename' in self.options:
self.options['modulename'] = self.name.lower()
# determine install command
self.use_setup_py = False
if self.cfg.get('use_easy_install', False):
self.install_cmd = EASY_INSTALL_INSTALL_CMD
# don't auto-install dependencies
self.cfg.update('installopts', '--no-deps')
if self.cfg.get('zipped_egg', False):
self.cfg.update('installopts', '--zip-ok')
elif self.cfg.get('use_pip', False):
self.install_cmd = PIP_INSTALL_CMD
# don't auto-install dependencies
self.cfg.update('installopts', '--no-deps')
if self.cfg.get('zipped_egg', False):
self.cfg.update('installopts', '--egg')
else:
self.use_setup_py = True
if self.cfg.get('use_setup_py_develop', False):
self.install_cmd = SETUP_PY_DEVELOP_CMD
else:
self.install_cmd = SETUP_PY_INSTALL_CMD
if self.cfg.get('zipped_egg', False):
raise EasyBuildError("Installing zipped eggs requires using easy_install or pip")
self.log.debug("Using '%s' as install command", self.install_cmd)
def set_pylibdirs(self):
"""Set Python lib directory-related class variables."""
# pylibdir is the 'main' Python lib directory
if self.pylibdir == UNKNOWN:
self.pylibdir = det_pylibdir(python_cmd=self.python_cmd)
self.log.debug("Python library dir: %s" % self.pylibdir)
# on (some) multilib systems, the platform-specific library directory for the system Python is different
# cfr. http://serverfault.com/a/88739/126446
# so, we keep a list of different Python lib directories to take into account
self.all_pylibdirs = nub([self.pylibdir, det_pylibdir(plat_specific=True, python_cmd=self.python_cmd)])
self.log.debug("All Python library dirs: %s" % self.all_pylibdirs)
# make very sure an entry starting with lib/ is present,
# since older versions of setuptools hardcode 'lib' rather than using the value produced by
# distutils.sysconfig.get_python_lib (which may always be lib64/...)
if not any(pylibdir.startswith('lib/') for pylibdir in self.all_pylibdirs):
pylibdir = os.path.join('lib', *self.pylibdir.split(os.path.sep)[1:])
self.all_pylibdirs.append(pylibdir)
self.log.debug("No lib/ entry found in list of Python lib dirs, so added it: %s", self.all_pylibdirs)
def prepare_python(self):
"""Python-specific preperations."""
# pick 'python' command to use
python = None
python_root = get_software_root('Python')
# keep in mind that Python may be listed as an allowed system dependency,
# so just checking Python root is not sufficient
if python_root:
bin_python = os.path.join(python_root, 'bin', 'python')
if os.path.exists(bin_python) and os.path.samefile(which('python'), bin_python):
# if Python is listed as a (build) dependency, use 'python' command provided that way
python = os.path.join(python_root, 'bin', 'python')
self.log.debug("Retaining 'python' command for Python dependency: %s", python)
if python is None:
# if using system Python, go hunting for a 'python' command that satisfies the requirements
python = pick_python_cmd(req_maj_ver=self.cfg['req_py_majver'], req_min_ver=self.cfg['req_py_minver'])
if python:
self.python_cmd = python
self.log.info("Python command being used: %s", self.python_cmd)
else:
raise EasyBuildError("Failed to pick Python command to use")
# set Python lib directories
self.set_pylibdirs()
def compose_install_command(self, prefix, extrapath=None, installopts=None):
"""Compose full install command."""
# mainly for debugging
if self.install_cmd.startswith(EASY_INSTALL_INSTALL_CMD):
run_cmd("%s setup.py easy_install --version" % self.python_cmd, verbose=False)
if self.install_cmd.startswith(PIP_INSTALL_CMD):
out, _ = run_cmd("pip --version", verbose=False, simple=False)
# pip 8.x or newer required, because of --prefix option being used
pip_version_regex = re.compile('^pip ([0-9.]+)')
res = pip_version_regex.search(out)
if res:
pip_version = res.group(1)
if LooseVersion(pip_version) >= LooseVersion('8.0'):
self.log.info("Found pip version %s, OK", pip_version)
else:
raise EasyBuildError("Need pip version 8.0 or newer, found version %s", pip_version)
elif not self.dry_run:
raise EasyBuildError("Could not determine pip version from \"%s\" using pattern '%s'",
out, pip_version_regex.pattern)
cmd = []
if extrapath:
cmd.append(extrapath)
if self.cfg.get('unpack_sources', True):
# specify current directory
loc = '.'
else:
# specify path to 1st source file
loc = self.src[0]['path']
if installopts is None:
installopts = self.cfg['installopts']
cmd.extend([
self.cfg['preinstallopts'],
self.install_cmd % {
'installopts': installopts,
'loc': loc,
'prefix': prefix,
'python': self.python_cmd,
},
])
return ' '.join(cmd)
def extract_step(self):
"""Unpack source files, unless instructed otherwise."""
if self.cfg.get('unpack_sources', True):
super(PythonPackage, self).extract_step()
def prerun(self):
"""Prepare for installing Python package."""
super(PythonPackage, self).prerun()
self.prepare_python()
def prepare_step(self):
"""Prepare for building and installing this Python package."""
super(PythonPackage, self).prepare_step()
self.prepare_python()
def configure_step(self):
"""Configure Python package build/install."""
if self.sitecfg is not None:
# used by some extensions, like numpy, to find certain libs
finaltxt = self.sitecfg
if self.sitecfglibdir:
repl = self.sitecfglibdir
finaltxt = finaltxt.replace('SITECFGLIBDIR', repl)
if self.sitecfgincdir:
repl = self.sitecfgincdir
finaltxt = finaltxt.replace('SITECFGINCDIR', repl)
self.log.debug("Using %s: %s" % (self.sitecfgfn, finaltxt))
try:
if os.path.exists(self.sitecfgfn):
txt = open(self.sitecfgfn).read()
self.log.debug("Found %s: %s" % (self.sitecfgfn, txt))
config = open(self.sitecfgfn, 'w')
config.write(finaltxt)
config.close()
except IOError:
raise EasyBuildError("Creating %s failed", self.sitecfgfn)
# creates log entries for python being used, for debugging
run_cmd("%s -V" % self.python_cmd, verbose=False)
run_cmd("%s -c 'import sys; print(sys.executable)'" % self.python_cmd, verbose=False)
# don't add user site directory to sys.path (equivalent to python -s)
# see https://www.python.org/dev/peps/pep-0370/
env.setvar('PYTHONNOUSERSITE', '1', verbose=False)
run_cmd("%s -c 'import sys; print(sys.path)'" % self.python_cmd, verbose=False)
def build_step(self):
"""Build Python package using setup.py"""
if self.use_setup_py:
cmd = "%s %s setup.py build %s" % (self.cfg['prebuildopts'], self.python_cmd, self.cfg['buildopts'])
run_cmd(cmd, log_all=True, simple=True)
def test_step(self):
"""Test the built Python package."""
if isinstance(self.cfg['runtest'], basestring):
self.testcmd = self.cfg['runtest']
if self.cfg['runtest'] and self.testcmd is not None:
extrapath = ""
testinstalldir = None
if self.testinstall:
# install in test directory and export PYTHONPATH
try:
testinstalldir = tempfile.mkdtemp()
for pylibdir in self.all_pylibdirs:
mkdir(os.path.join(testinstalldir, pylibdir), parents=True)
except OSError, err:
raise EasyBuildError("Failed to create test install dir: %s", err)
# print Python search path (just debugging purposes)
run_cmd("%s -c 'import sys; print(sys.path)'" % self.python_cmd, verbose=False)
abs_pylibdirs = [os.path.join(testinstalldir, pylibdir) for pylibdir in self.all_pylibdirs]
extrapath = "export PYTHONPATH=%s &&" % os.pathsep.join(abs_pylibdirs + ['$PYTHONPATH'])
cmd = self.compose_install_command(testinstalldir, extrapath=extrapath)
run_cmd(cmd, log_all=True, simple=True, verbose=False)
if self.testcmd:
cmd = "%s%s" % (extrapath, self.testcmd % {'python': self.python_cmd})
run_cmd(cmd, log_all=True, simple=True)
if testinstalldir:
try:
rmtree2(testinstalldir)
except OSError, err:
raise EasyBuildError("Removing testinstalldir %s failed: %s", testinstalldir, err)
def install_step(self):
"""Install Python package to a custom path using setup.py"""
# create expected directories
abs_pylibdirs = [os.path.join(self.installdir, pylibdir) for pylibdir in self.all_pylibdirs]
for pylibdir in abs_pylibdirs:
mkdir(pylibdir, parents=True)
# set PYTHONPATH as expected
pythonpath = os.getenv('PYTHONPATH')
new_pythonpath = os.pathsep.join([x for x in abs_pylibdirs + [pythonpath] if x is not None])
env.setvar('PYTHONPATH', new_pythonpath, verbose=False)
# actually install Python package
cmd = self.compose_install_command(self.installdir)
run_cmd(cmd, log_all=True, simple=True)
# restore PYTHONPATH if it was set
if pythonpath is not None:
env.setvar('PYTHONPATH', pythonpath, verbose=False)
def run(self, *args, **kwargs):
"""Perform the actual Python package build/installation procedure"""
if not self.src:
raise EasyBuildError("No source found for Python package %s, required for installation. (src: %s)",
self.name, self.src)
kwargs.update({'unpack_src': True})
super(PythonPackage, self).run(*args, **kwargs)
# configure, build, test, install
self.configure_step()
self.build_step()
self.test_step()
self.install_step()
def sanity_check_step(self, *args, **kwargs):
"""
Custom sanity check for Python packages
"""
if 'exts_filter' not in kwargs:
orig_exts_filter = EXTS_FILTER_PYTHON_PACKAGES
exts_filter = (orig_exts_filter[0].replace('python', self.python_cmd), orig_exts_filter[1])
kwargs.update({'exts_filter': exts_filter})
return super(PythonPackage, self).sanity_check_step(*args, **kwargs)
def make_module_req_guess(self):
"""
Define list of subdirectories to consider for updating path-like environment variables ($PATH, etc.).
"""
guesses = super(PythonPackage, self).make_module_req_guess()
# avoid that lib subdirs are appended to $*LIBRARY_PATH if they don't provide libraries
# typically, only lib/pythonX.Y/site-packages should be added to $PYTHONPATH (see make_module_extra)
for envvar in ['LD_LIBRARY_PATH', 'LIBRARY_PATH']:
newlist = []
for subdir in guesses[envvar]:
# only subdirectories that contain one or more files/libraries should be retained
fullpath = os.path.join(self.installdir, subdir)
if os.path.exists(fullpath):
if any([os.path.isfile(os.path.join(fullpath, x)) for x in os.listdir(fullpath)]):
newlist.append(subdir)
self.log.debug("Only retaining %s subdirs from %s for $%s (others don't provide any libraries)",
newlist, guesses[envvar], envvar)
guesses[envvar] = newlist
return guesses
def make_module_extra(self, *args, **kwargs):
"""Add install path to PYTHONPATH"""
txt = ''
self.set_pylibdirs()
for path in self.all_pylibdirs:
fullpath = os.path.join(self.installdir, path)
# only extend $PYTHONPATH with existing, non-empty directories
if os.path.exists(fullpath) and os.listdir(fullpath):
txt += self.module_generator.prepend_paths('PYTHONPATH', path)
return super(PythonPackage, self).make_module_extra(txt, *args, **kwargs)
|
ocaisa/easybuild-easyblocks
|
easybuild/easyblocks/generic/pythonpackage.py
|
Python
|
gpl-2.0
| 22,401
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pooler
import os
from export_tools import *
from osv import osv, fields
class wiz_sneldev_categories_import(osv.osv_memory):
_name = 'sneldev.categories.import'
_description = 'Import categories'
_columns = {
}
_defaults = {
}
def do_categories_import(self, cr, uid, ids, context=None):
if (self.pool.get('sneldev.magento').import_categories(cr, uid) < 0):
raise osv.except_osv(('Warning'), ('Import failed, please refer to log file for failure details.'))
return {'type': 'ir.actions.act_window_close'}
wiz_sneldev_categories_import()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sysadminmatmoz/odoo-clearcorp
|
TODO-7.0/sneldev_magento/wizard/sneldev_magento_categories_import.py
|
Python
|
agpl-3.0
| 1,660
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
import sys
from threading import Timer
from ..local import utils
from ..objects import output
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = \
ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
except ImportError:
pass
return prev_error_mode
def RunProcess(verbose, timeout, args, additional_env, **rest):
if verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = subprocess.list2cmdline(args)
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
env = os.environ.copy()
env.update(additional_env)
# GTest shard information is read by the V8 tests runner. Make sure it
# doesn't leak into the execution of gtests we're wrapping. Those might
# otherwise apply a second level of sharding and as a result skip tests.
env.pop('GTEST_TOTAL_SHARDS', None)
env.pop('GTEST_SHARD_INDEX', None)
try:
process = subprocess.Popen(
args=popen_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
**rest
)
except Exception as e:
sys.stderr.write("Error executing: %s\n" % popen_args)
raise e
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
def kill_process(process, timeout_result):
timeout_result[0] = True
try:
if utils.IsWindows():
if verbose:
print "Attempting to kill process %d" % process.pid
sys.stdout.flush()
tk = subprocess.Popen(
'taskkill /T /F /PID %d' % process.pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = tk.communicate()
if verbose:
print "Taskkill results for %d" % process.pid
print stdout
print stderr
print "Return code: %d" % tk.returncode
sys.stdout.flush()
else:
process.kill()
except OSError:
sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
# Pseudo object to communicate with timer thread.
timeout_result = [False]
timer = Timer(timeout, kill_process, [process, timeout_result])
timer.start()
stdout, stderr = process.communicate()
timer.cancel()
return output.Output(
process.returncode,
timeout_result[0],
stdout.decode('utf-8', 'replace').encode('utf-8'),
stderr.decode('utf-8', 'replace').encode('utf-8'),
process.pid,
)
def Execute(args, verbose=False, timeout=None, env=None):
args = [ c for c in args if c != "" ]
return RunProcess(verbose, timeout, args, env or {})
|
macchina-io/macchina.io
|
platform/JS/V8/v8/tools/testrunner/local/commands.py
|
Python
|
apache-2.0
| 4,703
|
# testing/util.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..util import jython, pypy, defaultdict, decorator, py2k
import decimal
import gc
import time
import random
import sys
import types
if jython:
def jython_gc_collect(*args):
"""aggressive gc.collect for tests."""
gc.collect()
time.sleep(0.1)
gc.collect()
gc.collect()
return 0
# "lazy" gc, for VM's that don't GC on refcount == 0
gc_collect = lazy_gc = jython_gc_collect
elif pypy:
def pypy_gc_collect(*args):
gc.collect()
gc.collect()
gc_collect = lazy_gc = pypy_gc_collect
else:
# assume CPython - straight gc.collect, lazy_gc() is a pass
gc_collect = gc.collect
def lazy_gc():
pass
def picklers():
picklers = set()
if py2k:
try:
import cPickle
picklers.add(cPickle)
except ImportError:
pass
import pickle
picklers.add(pickle)
# yes, this thing needs this much testing
for pickle_ in picklers:
for protocol in -1, 0, 1, 2:
yield pickle_.loads, lambda d: pickle_.dumps(d, protocol)
def round_decimal(value, prec):
if isinstance(value, float):
return round(value, prec)
# can also use shift() here but that is 2.6 only
return (value * decimal.Decimal("1" + "0" * prec)
).to_integral(decimal.ROUND_FLOOR) / \
pow(10, prec)
class RandomSet(set):
def __iter__(self):
l = list(set.__iter__(self))
random.shuffle(l)
return iter(l)
def pop(self):
index = random.randint(0, len(self) - 1)
item = list(set.__iter__(self))[index]
self.remove(item)
return item
def union(self, other):
return RandomSet(set.union(self, other))
def difference(self, other):
return RandomSet(set.difference(self, other))
def intersection(self, other):
return RandomSet(set.intersection(self, other))
def copy(self):
return RandomSet(self)
def conforms_partial_ordering(tuples, sorted_elements):
"""True if the given sorting conforms to the given partial ordering."""
deps = defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
for i, node in enumerate(sorted_elements):
for n in sorted_elements[i:]:
if node in deps[n]:
return False
else:
return True
def all_partial_orderings(tuples, elements):
edges = defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
def _all_orderings(elements):
if len(elements) == 1:
yield list(elements)
else:
for elem in elements:
subset = set(elements).difference([elem])
if not subset.intersection(edges[elem]):
for sub_ordering in _all_orderings(subset):
yield [elem] + sub_ordering
return iter(_all_orderings(elements))
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
This function should be phased out as much as possible
in favor of @decorator. Tests that "generate" many named tests
should be modernized.
"""
try:
fn.__name__ = name
except TypeError:
fn = types.FunctionType(fn.__code__, fn.__globals__, name,
fn.__defaults__, fn.__closure__)
return fn
def run_as_contextmanager(ctx, fn, *arg, **kw):
"""Run the given function under the given contextmanager,
simulating the behavior of 'with' to support older
Python versions.
This is not necessary anymore as we have placed 2.6
as minimum Python version, however some tests are still using
this structure.
"""
obj = ctx.__enter__()
try:
result = fn(obj, *arg, **kw)
ctx.__exit__(None, None, None)
return result
except:
exc_info = sys.exc_info()
raise_ = ctx.__exit__(*exc_info)
if not raise_:
raise
else:
return raise_
def rowset(results):
"""Converts the results of sql execution into a plain set of column tuples.
Useful for asserting the results of an unordered query.
"""
return {tuple(row) for row in results}
def fail(msg):
assert False, msg
@decorator
def provide_metadata(fn, *args, **kw):
"""Provide bound MetaData for a single test, dropping afterwards."""
from . import config
from . import engines
from sqlalchemy import schema
metadata = schema.MetaData(config.db)
self = args[0]
prev_meta = getattr(self, 'metadata', None)
self.metadata = metadata
try:
return fn(*args, **kw)
finally:
engines.drop_all_tables(metadata, config.db)
self.metadata = prev_meta
def force_drop_names(*names):
"""Force the given table names to be dropped after test complete,
isolating for foreign key cycles
"""
from . import config
from sqlalchemy import inspect
@decorator
def go(fn, *args, **kw):
try:
return fn(*args, **kw)
finally:
drop_all_tables(
config.db, inspect(config.db), include_names=names)
return go
class adict(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def __call__(self, *keys):
return tuple([self[key] for key in keys])
get_all = __call__
def drop_all_tables(engine, inspector, schema=None, include_names=None):
from sqlalchemy import Column, Table, Integer, MetaData, \
ForeignKeyConstraint
from sqlalchemy.schema import DropTable, DropConstraint
if include_names is not None:
include_names = set(include_names)
with engine.connect() as conn:
for tname, fkcs in reversed(
inspector.get_sorted_table_and_fkc_names(schema=schema)):
if tname:
if include_names is not None and tname not in include_names:
continue
conn.execute(DropTable(
Table(tname, MetaData(), schema=schema)
))
elif fkcs:
if not engine.dialect.supports_alter:
continue
for tname, fkc in fkcs:
if include_names is not None and \
tname not in include_names:
continue
tb = Table(
tname, MetaData(),
Column('x', Integer),
Column('y', Integer),
schema=schema
)
conn.execute(DropConstraint(
ForeignKeyConstraint(
[tb.c.x], [tb.c.y], name=fkc)
))
def teardown_events(event_cls):
@decorator
def decorate(fn, *arg, **kw):
try:
return fn(*arg, **kw)
finally:
event_cls._clear()
return decorate
|
fernandog/Medusa
|
ext/sqlalchemy/testing/util.py
|
Python
|
gpl-3.0
| 7,535
|
##########################################################################
# Copyright (c) 2013, 2014, University of Washington.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import sys, os, signal, time, getpass, subprocess, socket, pty
import debug, machines, uw_machinedata
from machines import Machine, MachineLockedError, MachineFactory
TFTP_PATH='/var/lib/tftpboot'
TOOLS_PATH='/usr/local/bin'
RACKBOOT=os.path.join(TOOLS_PATH, 'rackboot.sh')
RACKPOWER=os.path.join(TOOLS_PATH, 'rackpower')
class UWMachine(Machine):
_uw_machines = uw_machinedata.machines
host2mgmt = {
'bigfish.cs.washington.edu': 'bigfish-e1k1.cs.washington.edu',
'swingout1.cs.washington.edu': 'swingout1-brcm1.cs.washington.edu',
'swingout5.cs.washington.edu': 'swingout5-brcm1.cs.washington.edu'
}
def __init__(self, options):
super(UWMachine, self).__init__(options)
self.lockprocess = None
self.masterfd = None
def get_bootarch(self):
b = self._uw_machines[self.name]['bootarch']
assert(b in self.get_buildarchs())
return b
def get_machine_name(self):
return self._uw_machines[self.name]['machine_name']
def get_buildarchs(self):
return self._uw_machines[self.name]['buildarchs']
def get_ncores(self):
return self._uw_machines[self.name]['ncores']
def get_cores_per_socket(self):
return self._uw_machines[self.name]['cores_per_socket']
def get_tickrate(self):
return self._uw_machines[self.name]['tickrate']
def get_perfcount_type(self):
return self._uw_machines[self.name]['perfcount_type']
def get_kernel_args(self):
return self._uw_machines[self.name].get('kernel_args')
def get_pci_args(self):
return self._uw_machines[self.name].get('pci_args')
def get_boot_timeout(self):
return self._uw_machines[self.name].get('boot_timeout')
def get_hostname(self):
return self.get_machine_name() + '.cs.washington.edu'
def get_ip(self):
return socket.gethostbyname(self.host2mgmt[self.get_hostname()])
def get_tftp_dir(self):
user = getpass.getuser()
return os.path.join(TFTP_PATH, user, self.name + "_harness")
def _write_menu_lst(self, data, path):
debug.verbose('writing %s' % path)
debug.debug(data)
f = open(path, 'w')
f.write(data)
f.close()
def _set_menu_lst(self, relpath):
ip_menu_name = os.path.join(TFTP_PATH, "menu.lst." + self.get_ip())
debug.verbose('relinking %s to %s' % (ip_menu_name, relpath))
os.remove(ip_menu_name)
os.symlink(relpath, ip_menu_name)
def set_bootmodules(self, modules):
fullpath = os.path.join(self.get_tftp_dir(), 'menu.lst')
relpath = os.path.relpath(fullpath, TFTP_PATH)
tftppath = '/' + os.path.relpath(self.get_tftp_dir(), TFTP_PATH)
self._write_menu_lst(modules.get_menu_data(tftppath), fullpath)
self._set_menu_lst(relpath)
def lock(self):
"""Use conserver to lock the machine."""
# find out current status of console
debug.verbose('executing "console -i %s" to check state' %
self.get_machine_name())
proc = subprocess.Popen(["console", "-i", self.get_machine_name()],
stdout=subprocess.PIPE)
line = proc.communicate()[0]
assert(proc.returncode == 0)
# check that nobody else has it open for writing
myuser = getpass.getuser()
parts = line.strip().split(':')
conname, child, contype, details, users, state = parts[:6]
if users:
for userinfo in users.split(','):
mode, username, host, port = userinfo.split('@')[:4]
if 'w' in mode and username != myuser:
raise MachineLockedError # Machine is not free
# run a console in the background to 'hold' the lock and read output
debug.verbose('starting "console %s"' % self.get_machine_name())
# run on a PTY to work around terminal mangling code in console
(self.masterfd, slavefd) = pty.openpty()
self.lockprocess = subprocess.Popen(["console", self.get_machine_name()],
close_fds=True,
stdout=slavefd, stdin=slavefd)
os.close(slavefd)
# XXX: open in binary mode with no buffering
# otherwise select.select() may block when there is data in the buffer
self.console_out = os.fdopen(self.masterfd, 'rb', 0)
def unlock(self):
if self.lockprocess is None:
return # noop
debug.verbose('quitting console process (%d)' % self.lockprocess.pid)
# os.kill(self.lockprocess.pid, signal.SIGTERM)
os.write(self.masterfd, "\x05c.")
self.lockprocess.wait()
self.lockprocess = None
self.masterfd = None
def __rackboot(self, args):
debug.checkcmd([RACKBOOT] + args + [self.get_machine_name()])
def setup(self):
self.__rackboot(["-b", "-n"])
def __rackpower(self, arg):
retries = 3
failed = False
while retries > 0:
try:
debug.checkcmd([RACKPOWER, arg, self.get_machine_name()])
except subprocess.CalledProcessError:
debug.warning("rackpower %s %s failed" %
(arg, self.get_machine_name()))
failed = True
if retries > 0:
debug.verbose("retrying...")
retries -= 1
if not failed:
break
def reboot(self):
self.__rackpower('-r')
def shutdown(self):
self.__rackpower('-d')
def get_output(self):
return self.console_out
for n in sorted(UWMachine._uw_machines.keys()):
class TmpMachine(UWMachine):
name = n
MachineFactory.addMachine(n, TmpMachine, **UWMachine._uw_machines[n])
|
kishoredbn/barrelfish
|
tools/harness/machines/uw.py
|
Python
|
mit
| 6,318
|
# Copyright (C) 2014 BDT Media Automation GmbH
#
# Author: Stefan Hauser <stefan.hauser@bdt.de>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" ValueStore File Interface for Swift Object Server"""
import os, stat
import xattr
from swift.obj import diskfile
from swift.common.exceptions import DiskFileNotExist
class VSDiskFileManager(diskfile.DiskFileManager):
def __init__(self, conf, logger):
diskfile.DiskFileManager.__init__(self, conf, logger)
def get_diskfile_from_audit_location(self, audit_location):
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
df = VSDiskFile.from_hash_dir(self,
audit_location.path, dev_path,
audit_location.partition)
return df
class VSDiskFileReader(diskfile.DiskFileReader):
def __init__(self, fp, data_file, obj_size, etag, threadpool,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, keep_cache=False):
diskfile.DiskFileReader.__init__(self, fp, data_file, obj_size, etag, threadpool,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, keep_cache)
def __iter__(self):
# construct path to storage file
storage_file = self._data_file
# first check if file exists
if os.path.isfile(storage_file):
status = None
corrupted = 0;
# file exists, now check extended attributes
try:
status = xattr.get(storage_file, 'user.vs.online')
file_stat = os.stat(storage_file)
try:
corrupted_ea = xattr.get(storage_file, 'user.vs.corrupted')
corrupted = ord(corrupted_ea[0])
except Exception, ex:
pass
except IOError:
pass
# status meta-data must exist
if status is None:
self._logger.info("status for %s is none." % self._data_file)
# user.vsvfs.online, 1 for online, 2 for offline.
if (status is not None and (ord(status[0]) == 1 or corrupted == 1)):
# do normal auditing
self._logger.info("Performing normal object audit of file %s" % self._data_file)
for chunk in super(VSDiskFileReader, self).__iter__():
if chunk:
yield chunk
return
# file not completely in cache, skip auditing
self._logger.info("Skipping object audit of file %s because storage file %s doesn't exist with full content "
% (self._data_file, storage_file))
# close file when done
if not self._suppress_file_closing:
self.close()
# return before yielding anything
# yield is required because this is a generator
#return
#yield
class VSDiskFile(diskfile.DiskFile):
def __init__(self, mgr, device_path, threadpool, partition,
account=None, container=None, obj=None, _datadir=None,
policy_idx=0, use_splice=False, pipe_size=None):
diskfile.DiskFile.__init__(self, mgr, device_path, threadpool, partition,
account, container, obj, _datadir, policy_idx, use_splice, pipe_size)
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition):
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path)
def open(self):
# TODO: check if file in cache
# if not in cache, return DiskFileNotExist
# This might break things in future when non-existing files
# are replicated as well
# Better continue and use the __iter__ functionality of the
# DiskFileReader object to handle caching
df = super(VSDiskFile, self).open()
return df
def reader(self, keep_cache=False,
_quarantine_hook=lambda m: None):
"""
Return a :class:`swift.common.swob.Response` class compatible
"`app_iter`" object as defined by
:class:`swift.obj.diskfile.DiskFileReader`.
For this implementation, the responsibility of closing the open file
is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
:param keep_cache: caller's preference for keeping data read in the
OS buffer cache
:param _quarantine_hook: 1-arg callable called when obj quarantined;
the arg is the reason for quarantine.
Default is to ignore it.
Not needed by the REST layer.
:returns: a :class:`swift.obj.diskfile.DiskFileReader` object
"""
# TODO: Check if file is in cache, if not, return exception
# This might break things in future when non-existing files
# are replicated as well
# Better continue and use the __iter__ functionality of the
# DiskFileReader object to handle caching
#random.seed()
#if random.random() > 0.5:
# raise DiskFileNotExist
# File in cache, so return a reader object
dr = VSDiskFileReader(
self._fp, self._data_file, int(self._metadata['Content-Length']),
self._metadata['ETag'], self._threadpool, self._disk_chunk_size,
self._mgr.keep_cache_size, self._device_path, self._logger,
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
pipe_size=self._pipe_size, keep_cache=keep_cache)
# At this point the reader object is now responsible for closing
# the file pointer.
self._fp = None
return dr
|
BDT-GER/SWIFT-TLC
|
source/Build/install_packages/SWIFT-Tape-Auditor/vs_auditor/vs_diskfile.py
|
Python
|
apache-2.0
| 6,438
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from tastypie.test import ResourceTestCaseMixin
from django.contrib.auth.models import Group
from geonode.groups.models import GroupProfile
from guardian.shortcuts import get_anonymous_user
from geonode.tests.base import GeoNodeBaseTestSupport
from geonode.base.populate_test_data import all_public
from geonode.layers.models import Layer
class PermissionsApiTests(ResourceTestCaseMixin, GeoNodeBaseTestSupport):
def setUp(self):
super(PermissionsApiTests, self).setUp()
self.user = 'admin'
self.passwd = 'admin'
self.list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'layers'})
all_public()
self.perm_spec = {"users": {}, "groups": {}}
def test_layer_get_list_unauth_all_public(self):
"""
Test that the correct number of layers are returned when the
client is not logged in and all are public
"""
resp = self.api_client.get(self.list_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
def test_layers_get_list_unauth_some_public(self):
"""
Test that if a layer is not public then not all are returned when the
client is not logged in
"""
layer = Layer.objects.all()[0]
layer.set_permissions(self.perm_spec)
resp = self.api_client.get(self.list_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 7)
def test_layers_get_list_auth_some_public(self):
"""
Test that if a layer is not public then all are returned if the
client is not logged in
"""
self.api_client.client.login(username=self.user, password=self.passwd)
layer = Layer.objects.all()[0]
layer.set_permissions(self.perm_spec)
resp = self.api_client.get(self.list_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
def test_layer_get_list_layer_private_to_one_user(self):
"""
Test that if a layer is only visible by admin, then does not appear
in the unauthenticated list nor in the list when logged is as bobby
"""
perm_spec = {"users": {"admin": ['view_resourcebase']}, "groups": {}}
layer = Layer.objects.all()[0]
layer.set_permissions(perm_spec)
resp = self.api_client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 7)
self.api_client.client.login(username='bobby', password='bob')
resp = self.api_client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
self.api_client.client.login(username=self.user, password=self.passwd)
resp = self.api_client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
layer.is_published = False
layer.save()
# with resource publishing
with self.settings(RESOURCE_PUBLISHING=True):
resp = self.api_client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
self.api_client.client.login(username='bobby', password='bob')
resp = self.api_client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
self.api_client.client.login(username=self.user, password=self.passwd)
resp = self.api_client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
def test_layer_get_detail_unauth_layer_not_public(self):
"""
Test that layer detail gives 401 when not public and not logged in
"""
layer = Layer.objects.all()[0]
layer.set_permissions(self.perm_spec)
self.assertHttpUnauthorized(self.api_client.get(
self.list_url + str(layer.id) + '/'))
self.api_client.client.login(username=self.user, password=self.passwd)
resp = self.api_client.get(self.list_url + str(layer.id) + '/')
self.assertValidJSONResponse(resp)
def test_new_user_has_access_to_old_layers(self):
"""Test that a new user can access the public available layers"""
from geonode.people.models import Profile
Profile.objects.create(username='imnew',
password='pbkdf2_sha256$12000$UE4gAxckVj4Z$N\
6NbOXIQWWblfInIoq/Ta34FdRiPhawCIZ+sOO3YQs=')
self.api_client.client.login(username='imnew', password='thepwd')
resp = self.api_client.get(self.list_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
class SearchApiTests(ResourceTestCaseMixin, GeoNodeBaseTestSupport):
"""Test the search"""
def setUp(self):
super(SearchApiTests, self).setUp()
self.list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'layers'})
all_public()
self.norman = get_user_model().objects.get(username="norman")
self.norman.groups.add(Group.objects.get(name='anonymous'))
self.test_user = get_user_model().objects.get(username='test_user')
self.test_user.groups.add(Group.objects.get(name='anonymous'))
self.bar = GroupProfile.objects.get(slug='bar')
self.anonymous_user = get_anonymous_user()
self.profiles_list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'profiles'})
self.groups_list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'groups'})
def test_profiles_filters(self):
"""Test profiles filtering"""
filter_url = self.profiles_list_url
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 9)
filter_url = self.profiles_list_url + '?name__icontains=norm'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.profiles_list_url + '?name__icontains=NoRmAN'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.profiles_list_url + '?name__icontains=bar'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 0)
def test_groups_filters(self):
"""Test groups filtering"""
filter_url = self.groups_list_url
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.groups_list_url + '?name__icontains=bar'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.groups_list_url + '?name__icontains=BaR'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.groups_list_url + '?name__icontains=foo'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 0)
def test_category_filters(self):
"""Test category filtering"""
# check we get the correct layers number returnered filtering on one
# and then two different categories
filter_url = self.list_url + '?category__identifier=location'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 3)
filter_url = self.list_url + \
'?category__identifier__in=location&category__identifier__in=biota'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 5)
def test_tag_filters(self):
"""Test keywords filtering"""
# check we get the correct layers number returnered filtering on one
# and then two different keywords
filter_url = self.list_url + '?keywords__slug=layertagunique'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.list_url + \
'?keywords__slug__in=layertagunique&keywords__slug__in=populartag'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
def test_owner_filters(self):
"""Test owner filtering"""
# check we get the correct layers number returnered filtering on one
# and then two different owners
filter_url = self.list_url + '?owner__username=user1'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
filter_url = self.list_url + \
'?owner__username__in=user1&owner__username__in=foo'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 2)
def test_title_filter(self):
"""Test title filtering"""
# check we get the correct layers number returnered filtering on the
# title
filter_url = self.list_url + '?title=layer2'
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 1)
def test_date_filter(self):
"""Test date filtering"""
# check we get the correct layers number returnered filtering on the
# dates
step = timedelta(days=60)
now = datetime.now()
fstring = '%Y-%m-%d'
def to_date(val):
return val.date().strftime(fstring)
d1 = to_date(now - step)
filter_url = self.list_url + '?date__exact={}'.format(d1)
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 0)
d3 = to_date(now - (3 * step))
filter_url = self.list_url + '?date__gte={}'.format(d3)
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 3)
d4 = to_date(now - (4 * step))
filter_url = self.list_url + '?date__range={},{}'.format(d4, to_date(now))
resp = self.api_client.get(filter_url)
self.assertValidJSONResponse(resp)
self.assertEquals(len(self.deserialize(resp)['objects']), 4)
|
MapStory/geonode
|
geonode/api/tests.py
|
Python
|
gpl-3.0
| 12,428
|
# urls.py
from django.conf.urls import url
from events.viewsv2 import *
from events.decorators import *
from events.formsv2 import *
from events.urls import *
from .viewsv2 import *
app_name = 'eventsv2'
urlpatterns = [
url(
r'^training-planner/',
TrainingPlannerListView.as_view(template_name="training_planner.html"),
name="training_planner"
),
url(
r'^select-participants/',
TrainingPlannerListView.as_view(template_name="select_participants.html"),
name="select_participants"
),
url(
r'^student-batch/new/$',
StudentBatchCreateView.as_view(template_name="new_batch.html", \
form_class=StudentBatchForm),
name="add_batch"
),
url(
r'^student-batch/(?P<bid>\d+)/new/$',
StudentBatchCreateView.as_view(template_name="update_batch.html", \
form_class=NewStudentBatchForm),
name="add_student"
),
url(
r'^student-batch/(?P<pk>\d+)/$',
StudentBatchUpdateView.as_view(template_name="edit_batch.html", \
form_class=UpdateStudentBatchForm),
name="edit_batch"
),
url(
r'^student-batch/edit/(?P<pk>\d+)/$',
StudentBatchYearUpdateView.as_view(template_name="edit_batch_year.html", \
form_class=UpdateStudentYearBatchForm),
name="edit_year"
),
url(
r'^student-batch/(?P<bid>\d+)/view/$',
StudentListView.as_view(template_name="list_student.html"),
name="list_student"
),
url(
r'^student-batch/$',
StudentBatchListView.as_view(template_name="student_batch_list.html"),
name="batch_list"
),
url(
r'^(?P<tpid>\d+)/training-request',
TrainingRequestCreateView.as_view(template_name="training_request.html"),
name="training_request"
),
url(
r'^(?P<tid>\d+)/attendance',
TrainingAttendanceListView.as_view(template_name=\
"training_attendance.html"),
name="training_attendance"
),
url(
r'^(?P<tid>\d+)/certificate',
TrainingCertificateListView.as_view(template_name=\
"training_certificate.html"),
name="training_certificate"
),
url(
r'^training-request/(?P<pk>\d+)/$',
TrainingRequestEditView.as_view(template_name=\
"edit_training_request.html"),
name="edit_training_request"
),
url(
r'^(?P<bid>\d+)/student-delete/(?P<pk>\d+)/$',
StudentMasterDeleteView.as_view(template_name="student_delete_masterbatch.html", \
success_url="/software-training/student-batch"),
name="student_delete"
),
url(
r'^training-certificate/(?P<taid>\d+)/organiser/$',
OrganiserTrainingCertificateView.as_view(), \
name="organiser_training_certificate"
),
url(
r'^training-certificate/(?P<taid>\d+)/student/$',
StudentTrainingCertificateView.as_view(), \
name="student_training_certificate"
),
url(
r'^course-map-list/$',
CourseMapListView.as_view(template_name="coursemap_list.html"),
name="coursemaplist"
),
url(
r'^course-map/(?P<pk>\d+)$',
CourseMapUpdateView.as_view(template_name=\
"coursemap.html"),
name="coursemapupdate"
),
# url(
# r'^single-training/pending/$',
# SingleTrainingNewListView.as_view(template_name="single-training.html"),
# name="single-training-pending"
# ),
# url(
# r'^single-training/approved/$',
# SingletrainingApprovedListView.as_view(template_name="single-training.html"),
# name="single-training-approved"
# ),
# url(
# r'^single-training/rejected/$',
# SingletrainingRejectedListView.as_view(template_name="single-training.html"),
# name="single-training-rejected"
# ),
# url(
# r'^single-training/ongoing/$',
# SingletrainingOngoingListView.as_view(template_name="single-training.html"),
# name="single-training-ongoing"
# ),
# url(
# r'^single-training/pendingattendance/$',
# SingletrainingPendingAttendanceListView.as_view(template_name="single-training.html"),
# name="single-training-pendingattendance"
# ),
# url(
# r'^single-training/completed/$',
# SingletrainingCompletedListView.as_view(template_name="single-training.html"),
# name="single-training-completed"
# ),
# url(
# r'^single-training/new/$',
# SingletrainingCreateView.as_view(template_name="single-training-form.html"),
# name="new-single-training"
# ),
url(
r'^single-training/(?P<pk>\d+)/edit/$',
SingletrainingUpdateView.as_view(template_name="single-training-form.html"),
name="update-single-training"
),
url(
r'^single-training/(?P<status>\w+)/$',
SingleTrainingListView.as_view(template_name="single-training.html"),
name="single-training-list"
),
url(
r'^single-training/(?P<pk>\d+)/complete/$',
SingletrainingMarkCompleteUpdateView.as_view(template_name=""),
name="markcomplete-single-training"
),
url(
r'^single-training/(?P<tid>\d+)/certificate',
SingleTrainingCertificateListView.as_view(template_name=\
"single-training-certificate.html"),
name="single-training-certificate"
),
url(
r'^single-training-certificate/(?P<taid>\d+)/organiser/$',
OrganiserSingleTrainingCertificateView.as_view(), \
name="organiser_singletraining_certificate"
),
#ajax
url(
r'^save-student/',
SaveStudentView.as_view()
),
# url(
# r'^get-course-option/',
# GetCourseOptionView.as_view()
# ),
url(
r'^get-batch-option/',
GetBatchOptionView.as_view()
),
url(
r'^get-course-option/',
GetCourseOptionView.as_view()
),
url(
r'^get-batch-course-status/',
GetBatchStatusView.as_view()
),
url(
r'^get-department-organiser-status/',
GetDepartmentOrganiserStatusView.as_view()
),
# url(
# r'^training-request/(?P<role>\w+)/(?P<status>\w+)/$',
# TrainingRequestListView.as_view(template_name='training_list.html'),
# name='training_list'
# ),
#url(r'^get-language-option/', GetLanguageOptionView.as_view()),
url(
r'^single-training/pending/(?P<pk>\d+)/approve/$',
SingleTrainingApprove,
name="single-training-approve"
),
url(
r'^single-training/pending/(?P<pk>\d+)/reject/$',
SingleTrainingReject,
name="single-training-reject"
),
url(
r'^single-training/pending/(?P<pk>\d+)/requestmarkattendance/$',
SingleTrainingPendingAttendance,
name="single_training_pending"
),
url(
r'^markas/(?P<pk>\d+)/complete/$',
MarkAsComplete,
name="mark_as_complete"
),
url(
r'^mark/(?P<pk>\d+)/complete/$',
MarkComplete,
name="mark_complete"
),
url(
r'^single-training/(?P<tid>\d+)/attendance',
SingleTrainingAttendanceListView.as_view(template_name=\
"single-training-attendance.html"),
name="single_training_attendance"
),
url(
r'^organiser-feedback/',
OrganiserFeedbackCreateView.as_view(template_name='organiser_feedback.html'),
name='organiser_feedback'
),
url(
r'^organiser-feedback-display/$',
OrganiserFeedbackListView.as_view(template_name="organiser_feedback_display.html"),
name="organiser-feedback-display"
),
url(
r'^old-training/$',
OldTrainingListView.as_view(template_name=\
"old_training.html"),
name="old_training"
),
url(
r'^old-training/(?P<tid>\d+)/participant/$',
OldStudentListView.as_view(template_name="old_list_student.html"),
name="old_list_student"
),
url(
r'^old-training/(?P<tid>\d+)/close/$',
OldTrainingCloseView.as_view(template_name=""),
name="old_training_close"
),
url(
r'^latex_workshop/$',
LatexWorkshopFileUpload,
name="latex-workshop"
),
url(
r'^student-batch/(?P<bid>\d+)/view/(?P<pk>\d+)$',
UpdateStudentName.as_view(template_name="update_student.html"),
name="update_student"
),
url(
r'^stworkshop-feedback/',
STWorkshopFeedbackCreateView.as_view(template_name='stworkshop_feedback.html'),
name='stworkshop_feedback'
),
url(
r'^stworkshop-feedback-pre/',
STWorkshopFeedbackPreCreateView.as_view(template_name='stworkshop_feedback_pre.html'),
name='stworkshop_feedback_pre'
),
url(
r'^stworkshop-feedback-post/',
STWorkshopFeedbackPostCreateView.as_view(template_name='stworkshop_feedback_post.html'),
name='stworkshop_feedback_post'
),
url(
r'^learn-drupal-feedback/',
LearnDrupalFeedbackCreateView.as_view(template_name='learndrupalfeedback.html'),
name='learndrupalfeedback'
),
url(
r'^(?P<tid>\d+)/oldattendance',
TrainingAttendanceListView.as_view(template_name=\
"mark_prev_attendance.html"),
name="previous_training_attendance"
),
url(
r'^(?P<pk>\d+)/reopen-training/$',
ReOpenTraining,
name="re-open-training"
),
url(
r'^payment-home/$',
payment_home,
name="payment_home"
),
url(
r'^payment-status/$',
payment_status,
name="payment_status"
),
url(
r'^payment-success/$',
payment_success,
name="payment_success"
),
url(
r'^payment-details/(?P<choice>\w+)/$',
payment_details,
name="payment_details"
),
url(
r'^payment-reconciliation-update/$',
payment_reconciliation_update,
name="payment_reconciliation_update"
),
url(r'^academic-transactions/$', academic_transactions,name="payment"),
url(
r'^training-request/(?P<role>\w+)/(?P<status>\w+)/$',
trainingrequest,
name='training_list'
),
url(
r'^request/(?P<trid>\d+)/certificate/$',
RequestCertificate,
name="request_certificate"
),
url(
r'^certificate-request/(?P<role>\w+)/(?P<choice>\w+)/$',
CertificateRequest,
name='certificate_request_list'
),
url(
r'^generate/(?P<trid>\d+)/certificate/$',
GenerateCertificate,
name="generate_certificate"
),
url(
r'^training-certificate/(?P<trid>\d+)/allcertificates/$',
AllTrainingCertificateView.as_view(), \
name="alltraining_certificate"
),
url(
r'^student-grade-filter/$',
StudentGradeFilter.as_view(),
name="student_grade_filter"
),
url(
r'^academic_payment_details/',
AcademicKeyCreateView.as_view(template_name='academic_payment_details_form.html'),
name='academic_payment_details'
),
]
|
Spoken-tutorial/spoken-website
|
events/urlsv2.py
|
Python
|
gpl-3.0
| 10,900
|
# This plugin should load and run successfully, but do no work.
import pybookwyrm
def find(wanted, bookwyrm):
pass
#FAIL error
|
Tmplt/bookwyrm
|
tests/plugins/success-but-no-work.py
|
Python
|
mit
| 133
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""estimator_lib python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import estimator_lib
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
estimator_lib.__all__ = [
s for s in dir(estimator_lib) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.estimator_lib import *
|
ghchinoy/tensorflow
|
tensorflow/python/estimator/estimator_lib.py
|
Python
|
apache-2.0
| 1,305
|
#!/usr/bin/env python3
import random
import numpy as np
import sympy
mod_space = 29
'''
Generate Encryption Key
'''
# In --> size of matrix (n x n)
# Out --> List of lists [[1,2,3],[4,5,6],[7,8,9]]
def generate_encryption_key(size):
determinant = 0
# Need to make sure encryption key is invertible, IE det(key) != 0
while determinant == 0:
matrix = []
for i in range(size): # Repeat i times based on input size
row = []
for k in range(size):
# Add Random integer from 0 - mod space that we are working in
number = random.randint(0, mod_space)
row.append(number)
matrix.append(row) # Add row to matrix
# Convert list of lists into numpy array, which acts as a matrix
encryption_key = np.array(matrix)
try:
determinant = sympy.Matrix(encryption_key.tolist()).inv_mod(29).det()
except:
pass
# If matrix is invertible, end function and return matrix
#print(determinant)
#determinant = int(np.linalg.det(encryption_key))
return encryption_key
'''
Find Modular Inverse
'''
# In --> number, modspace (default is 29 for our case)
# Out --> modular inverse of number
def modular_inverse(num):
for i in range(mod_space): # Loop through possibile inverses in modspace
if (num * i) % mod_space == 1: # If i is an inverse for the number in modspace, return the number
return i
return False # If inverse does not exist, return False
'''
Generate Decryption Key
'''
# In --> Encryption Key (matrix form)
# Out --> Decryption Key
def generate_decryption_key(encryption_key):
# Take the prod of these 2 vars
key_inv = np.linalg.inv(encryption_key) # Inverse of encryption key
# Determinant of encryption key
det_key = int(np.linalg.det(encryption_key))
#print((key_inv * (det_key) * modular_inverse(det_key)) % 29)
# How to get multiplicative inverse of det(key) % 29
# If key = [[1,2],[3,4]] , det(key) % 29 == 27 and
## inverse(det(key) % 29) == 14
##
##
# How do we get from 27 to 14?
##
# (det_key_mod * x) % 29 = inv --> solve for x
# x == 14 in our example
det_key_mod = int(det_key % 29) # Determinant of encryption key mod 29
# Find modular inverse of above var using function defined above
det_key_mod_inv = int(modular_inverse(det_key_mod))
#print(det_key_mod, det_key_mod_inv)
# Final decryption key for [[1,2],[3,4]] is [[27,1],[16,14]]
# decryption_key = inv(det(key)mod29) * (det(key) * inv(key)) % 29
decryption_key = (key_inv * det_key)
#decryption_key = np.around(decryption_key)
#decryption_key = decryption_key.astype(int)
decryption_key = (det_key_mod_inv * decryption_key) % 29
decryption_key = np.around(decryption_key, 0)
#print(decryption_key)
return decryption_key
def generate_sympy_decryption_key(encryption_key):
encryption_key = sympy.Matrix(encryption_key.tolist())
#key_inverse = encryption_key ** -1
#key_determinant = encryption_key.det()
decryption_key = np.array(encryption_key.inv_mod(29))
#key_determinant_mod = key_determinant % 29
return decryption_key
#x = np.array([[1,2],[3,4]])
# print(x)
#x = generate_encryption_key(4)
#generate_sympy_decryption_key(x)
#print(x)
#res = generate_decryption_key(x)
|
jbloom512/Linear_Algebra_Encryption
|
Generate_Encryption_Key.py
|
Python
|
mit
| 3,446
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp.modules
import logging
_logger = logging.getLogger(__name__)
def is_initialized(cr):
""" Check if a database has been initialized for the ORM.
The database can be initialized with the 'initialize' function below.
"""
cr.execute("SELECT relname FROM pg_class WHERE relkind='r' AND relname='ir_module_module'")
return len(cr.fetchall()) > 0
def initialize(cr):
""" Initialize a database with for the ORM.
This executes base/base.sql, creates the ir_module_categories (taken
from each module descriptor file), and creates the ir_module_module
and ir_model_data entries.
"""
f = openerp.modules.get_module_resource('base', 'base.sql')
if not f:
m = "File not found: 'base.sql' (provided by module 'base')."
_logger.critical(m)
raise IOError(m)
base_sql_file = openerp.tools.misc.file_open(f)
try:
cr.execute(base_sql_file.read())
cr.connection.commit()
finally:
base_sql_file.close()
for i in openerp.modules.get_modules():
mod_path = openerp.modules.get_module_path(i)
if not mod_path:
continue
# This will raise an exception if no/unreadable descriptor file.
info = openerp.modules.load_information_from_description_file(i)
if not info:
continue
categories = info['category'].split('/')
category_id = create_categories(cr, categories)
if info['installable']:
state = 'uninstalled'
else:
state = 'uninstallable'
cr.execute('INSERT INTO ir_module_module \
(author, website, name, shortdesc, description, \
category_id, auto_install, state, web, license, application, icon, sequence, summary) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id', (
info['author'],
info['website'], i, info['name'],
info['description'], category_id,
info['auto_install'], state,
info['web'],
info['license'],
info['application'], info['icon'],
info['sequence'], info['summary']))
id = cr.fetchone()[0]
cr.execute('INSERT INTO ir_model_data \
(name,model,module, res_id, noupdate) VALUES (%s,%s,%s,%s,%s)', (
'module_'+i, 'ir.module.module', 'base', id, True))
dependencies = info['depends']
for d in dependencies:
cr.execute('INSERT INTO ir_module_module_dependency \
(module_id,name) VALUES (%s, %s)', (id, d))
# Install recursively all auto-installing modules
while True:
cr.execute("""SELECT m.name FROM ir_module_module m WHERE m.auto_install AND state != 'to install'
AND NOT EXISTS (
SELECT 1 FROM ir_module_module_dependency d JOIN ir_module_module mdep ON (d.name = mdep.name)
WHERE d.module_id = m.id AND mdep.state != 'to install'
)""")
to_auto_install = [x[0] for x in cr.fetchall()]
if not to_auto_install: break
cr.execute("""UPDATE ir_module_module SET state='to install' WHERE name in %s""", (tuple(to_auto_install),))
cr.connection.commit()
def create_categories(cr, categories):
""" Create the ir_module_category entries for some categories.
categories is a list of strings forming a single category with its
parent categories, like ['Grand Parent', 'Parent', 'Child'].
Return the database id of the (last) category.
"""
p_id = None
category = []
while categories:
category.append(categories[0])
xml_id = 'module_category_' + ('_'.join(map(lambda x: x.lower(), category))).replace('&', 'and').replace(' ', '_')
# search via xml_id (because some categories are renamed)
cr.execute("SELECT res_id FROM ir_model_data WHERE name=%s AND module=%s AND model=%s",
(xml_id, "base", "ir.module.category"))
c_id = cr.fetchone()
if not c_id:
cr.execute('INSERT INTO ir_module_category \
(name, parent_id) \
VALUES (%s, %s) RETURNING id', (categories[0], p_id))
c_id = cr.fetchone()[0]
cr.execute('INSERT INTO ir_model_data (module, name, res_id, model) \
VALUES (%s, %s, %s, %s)', ('base', xml_id, c_id, 'ir.module.category'))
else:
c_id = c_id[0]
p_id = c_id
categories = categories[1:]
return p_id
def has_unaccent(cr):
""" Test if the database has an unaccent function.
The unaccent is supposed to be provided by the PostgreSQL unaccent contrib
module but any similar function will be picked by OpenERP.
"""
cr.execute("SELECT proname FROM pg_proc WHERE proname='unaccent'")
return len(cr.fetchall()) > 0
|
zbqf109/goodo
|
openerp/modules/db.py
|
Python
|
gpl-3.0
| 5,047
|
import sys
import os
from _pydev_bundle import pydev_monkey
sys.path.insert(0, os.path.split(os.path.split(__file__)[0])[0])
from _pydevd_bundle.pydevd_constants import Null
import unittest
try:
import thread
except:
import _thread as thread # @UnresolvedImport
try:
xrange
except:
xrange = range
#=======================================================================================================================
# TestCase
#=======================================================================================================================
class TestCase(unittest.TestCase):
'''
Used for profiling the PyDBAdditionalThreadInfoWithoutCurrentFramesSupport version
'''
def test_met_no_frames_support(self):
from _pydevd_bundle.pydevd_additional_thread_info_regular import PyDBAdditionalThreadInfoWithoutCurrentFramesSupport
info = PyDBAdditionalThreadInfoWithoutCurrentFramesSupport()
main_debugger = Null()
filename = ''
base = ''
additional_info = Null()
t = Null()
frame = Null()
times = 10
for i in range(times):
info.create_db_frame((main_debugger, filename, additional_info, t, frame))
#we haven't kept any reference, so, they must have been garbage-collected already!
self.assertEqual(0, len(info.iter_frames(t)))
kept_frames = []
for i in range(times):
kept_frames.append(info.create_db_frame((main_debugger, filename, additional_info, t, frame)))
for i in range(times):
self.assertEqual(times, len(info.iter_frames(t)))
def test_start_new_thread(self):
pydev_monkey.patch_thread_modules()
try:
found = {}
def function(a, b, *args, **kwargs):
found['a'] = a
found['b'] = b
found['args'] = args
found['kwargs'] = kwargs
thread.start_new_thread(function, (1,2,3,4), {'d':1, 'e':2})
import time
for _i in xrange(20):
if len(found) == 4:
break
time.sleep(.1)
else:
raise AssertionError('Could not get to condition before 2 seconds')
self.assertEqual({'a': 1, 'b': 2, 'args': (3, 4), 'kwargs': {'e': 2, 'd': 1}}, found)
finally:
pydev_monkey.undo_patch_thread_modules()
def test_start_new_thread2(self):
pydev_monkey.patch_thread_modules()
try:
found = {}
class F(object):
start_new_thread = thread.start_new_thread
def start_it(self):
try:
self.start_new_thread(self.function, (1,2,3,4), {'d':1, 'e':2})
except:
import traceback;traceback.print_exc()
def function(self, a, b, *args, **kwargs):
found['a'] = a
found['b'] = b
found['args'] = args
found['kwargs'] = kwargs
f = F()
f.start_it()
import time
for _i in xrange(20):
if len(found) == 4:
break
time.sleep(.1)
else:
raise AssertionError('Could not get to condition before 2 seconds')
self.assertEqual({'a': 1, 'b': 2, 'args': (3, 4), 'kwargs': {'e': 2, 'd': 1}}, found)
finally:
pydev_monkey.undo_patch_thread_modules()
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
unittest.main()
|
hurricup/intellij-community
|
python/helpers/pydev/tests_pydevd_python/test_additional_thread_info.py
|
Python
|
apache-2.0
| 3,874
|
# -*- test-case-name: twisted.python.test.test_htmlizer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
HTML rendering of Python source.
"""
import tokenize, cgi, keyword
from . import reflect
class TokenPrinter:
currentCol, currentLine = 0, 1
lastIdentifier = parameters = 0
def __init__(self, writer):
self.writer = writer
def printtoken(self, type, token, sCoordinates, eCoordinates, line):
(srow, scol) = sCoordinates
(erow, ecol) = eCoordinates
#print "printtoken(%r,%r,%r,(%r,%r),(%r,%r),%r), row=%r,col=%r" % (
# self, type, token, srow,scol, erow,ecol, line,
# self.currentLine, self.currentCol)
if self.currentLine < srow:
self.writer('\n'*(srow-self.currentLine))
self.currentLine, self.currentCol = srow, 0
self.writer(' '*(scol-self.currentCol))
if self.lastIdentifier:
type = "identifier"
self.parameters = 1
elif type == tokenize.NAME:
if keyword.iskeyword(token):
type = 'keyword'
else:
if self.parameters:
type = 'parameter'
else:
type = 'variable'
else:
type = tokenize.tok_name.get(type).lower()
self.writer(token, type)
self.currentCol = ecol
self.currentLine += token.count('\n')
if self.currentLine != erow:
self.currentCol = 0
self.lastIdentifier = token in ('def', 'class')
if token == ':':
self.parameters = 0
class HTMLWriter:
noSpan = []
def __init__(self, writer):
self.writer = writer
noSpan = []
reflect.accumulateClassList(self.__class__, "noSpan", noSpan)
self.noSpan = noSpan
def write(self, token, type=None):
token = cgi.escape(token)
if (type is None) or (type in self.noSpan):
self.writer(token)
else:
self.writer('<span class="py-src-%s">%s</span>' %
(type, token))
class SmallerHTMLWriter(HTMLWriter):
"""HTMLWriter that doesn't generate spans for some junk.
Results in much smaller HTML output.
"""
noSpan = ["endmarker", "indent", "dedent", "op", "newline", "nl"]
def filter(inp, out, writer=HTMLWriter):
out.write('<pre>')
printer = TokenPrinter(writer(out.write).write).printtoken
try:
tokenize.tokenize(inp.readline, printer)
except tokenize.TokenError:
pass
out.write('</pre>\n')
def main():
import sys
with open(sys.argv[1]) as f:
filter(f, sys.stdout)
if __name__ == '__main__':
main()
|
Tokyo-Buffalo/tokyosouth
|
env/lib/python3.6/site-packages/twisted/python/htmlizer.py
|
Python
|
mit
| 2,729
|
import sys
import os
sys.path.append(os.path.abspath("../md/"))
import base
import media
import time
import threading
class ThreadAction1(threading.Thread):
def __init__(self, host):
super().__init__()
self.dut = media.Telnet(host)
def run(self):
while True:
self.dut.command_more("show tech-support")
time.sleep(2)
self.dut.command_more("show system")
time.sleep(2)
class ThreadAction2(threading.Thread):
def __init__(self, host):
super().__init__()
self.dut = media.Telnet(host)
#self.dut = media.Telnet(host, output=True)
self.host = host
def run(self):
while True:
self.dut.command_more("show tech-support")
time.sleep(2)
self.dut.command_more("show system")
time.sleep(2)
self.dut.command_more("config ter")
slot = ["4", "8"]
for s in slot:
for i in range(1, 5):
interface = "interface slot" + s + "-" + str(i)
self.dut.command_more(interface)
self.dut.command_more("shutdown")
time.sleep(5)
cli = "do show interface status | grep slot" + s + "-" + str(i)
rst = self.dut.command_more(cli)
print("#"*80)
print("host:", self.host)
print(rst)
print("#"*80)
self.dut.command_more("no shutdown")
time.sleep(5)
cli = "do show interface status | grep slot" + s + "-" + str(i)
rst = self.dut.command_more(cli)
print("#"*80)
print("host:", self.host)
print(rst)
print("#"*80)
self.dut.command_more("end")
class ThreadCheck(threading.Thread):
def __init__(self, host):
super().__init__()
self.host = host
#self.dut = media.Telnet(host, output=True)
self.dut = media.Telnet(host)
def run(self):
while True:
self.dut.command_more("start-shell")
rst = self.dut.command_more("ps aux | grep hsl")
print("#"*80)
print("host:", self.host)
print(rst)
print("#"*80)
self.dut.command_more("exit")
time.sleep(5)
dut_up = "192.168.4.88"
dut_down = "192.168.4.87"
t1 = ThreadCheck(dut_up)
t1.start()
t2 = ThreadCheck(dut_down)
t2.start()
t3 = ThreadAction1(dut_up)
t3.start()
t4 = ThreadAction2(dut_down)
t4.start()
|
jaeick/ts
|
modo/gplat/gplat-565.py
|
Python
|
apache-2.0
| 2,664
|
import importlib
import inspect
from .job import Job
from .brokers.standard import Standard
from .brokers.eager import Eager
from .connectors.sqs import SQS
from .worker import Worker
import logging
logger = logging.getLogger('sqjobs')
def create_eager_broker():
return Eager()
def create_sqs_broker(access_key, secret_key, region_name='us-west-1', endpoint_url=None):
sqs = SQS(
access_key=access_key,
secret_key=secret_key,
region_name=region_name,
endpoint_url=endpoint_url,
)
return Standard(sqs)
def create_sqs_worker(queue_name, access_key, secret_key, region_name='us-west-1', endpoint_url=None):
broker = create_sqs_broker(access_key, secret_key, region_name, endpoint_url)
return Worker(broker, queue_name)
def get_jobs_from_module(module_name):
jobs = []
try:
module = importlib.import_module(module_name)
except ImportError:
return jobs
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(obj, Job) and not obj.abstract and obj is not Job:
jobs.append(obj)
return jobs
|
gnufede/sqjobs
|
sqjobs/utils.py
|
Python
|
bsd-3-clause
| 1,138
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
def default(session):
"""Run the unit test suite.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.securitycenter_v1beta1',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=89', # TODO: Coverage should be raised to 97%
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python='3.6')
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session(python='3.6')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.chdir(os.path.dirname(__file__))
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
jonparrott/gcloud-python
|
securitycenter/noxfile.py
|
Python
|
apache-2.0
| 2,586
|
import dateutil.parser
from display import TextDisplay
from plugins import Plugin
__author__ = 'teddydestodes'
'''
Created on Dec 29, 2012
@author: teddydestodes
'''
import requests
import time
import math
import datetime
import pytz
import urllib.parse
def now():
return pytz.utc.localize(datetime.datetime.utcnow())
class XivelyPlugin(Plugin):
api_url = 'https://api.xively.com/v2/'
query_options = {'interval_type': 'discrete',
'interval': '60',
'duration': '12hours'}
interval = 300
def __init__(self):
super().__init__()
self._last_poll = 0
self._values = {}
self._current_value = None
self._streams = [{'feed': '92670',
'stream': 'outside',
'apikey': 'MyIMAVdg8RSXXBXIYuUtMsMqTpAhRLUcBITCGBO8tyUXDArd'},
{'feed': '92670',
'stream': 'inside',
'apikey': 'MyIMAVdg8RSXXBXIYuUtMsMqTpAhRLUcBITCGBO8tyUXDArd'}]
self._values = {}
def tick(self):
if time.time() - self._last_poll < XivelyPlugin.interval:
return
self._last_poll = time.time()
self._refresh()
def _refresh(self):
for stream in self._streams:
self._get_data(**stream)
def _send_request(self, feed, stream, apikey, start=None):
params = dict(self.query_options)
if start:
params['start'] = start
r = requests.get("{apiurl}feeds/{feed}/datastreams/{stream}/?{params}".format(apiurl=self.api_url,
feed=feed,
stream=stream,
params=urllib.parse.urlencode(params)),
headers={"X-ApiKey": apikey})
return r.json()
def _get_data(self, feed, stream, apikey):
key = feed+'.'+stream
if key not in self._values:
self._values[key] = {'current': None,
'unit': None,
'name': stream,
'data': {}}
dates = sorted(self._values[key]['data'].keys())
if len(dates) > 0:
last_iso = dates[-1].isoformat()
else:
last_iso = None
data = self._send_request(feed, stream, apikey, start=last_iso)
self._values[key]['current'] = data.get('current_value')
self._values[key]['unit'] = data.get('unit', {}).get('label', '')
for value in data['datapoints']:
curr_date = dateutil.parser.parse(value['at'])
if curr_date > now():
return
try:
self._values[key]['data'][curr_date] = float(value['value'])
except ValueError as e:
self._values[key]['data'][curr_date] = None
self._get_data(feed, stream, apikey)
def draw(self, display):
if isinstance(display, TextDisplay):
self._draw_text(display)
pass
def _draw_text(self, display):
text = ''
for stream in self._values.values():
if stream['current'] is not None:
text += stream['name']+': '+stream['current']+stream['unit']+' '
display.set_text(text)
|
TeddyDesTodes/pyflipdot
|
pyflipdot/plugins/xively/__init__.py
|
Python
|
bsd-3-clause
| 3,479
|
from os.path import join
import cobra
from cobra.io import read_sbml_model, write_sbml_model
def test_notes(tmp_path):
"""Testing if model notes are written in SBML"""
path_to_file = join(str(tmp_path), "model_notes.xml")
# making a minimal cobra model to test notes
model = cobra.Model("e_coli_core")
model.notes["Remark"] = "...Model Notes..."
met = cobra.Metabolite("pyr_c", compartment="c")
model.add_metabolites([met])
met.notes["Remark"] = "Note with \n newline"
rxn = cobra.Reaction("R_ATPM")
model.add_reactions([rxn])
rxn.notes["Remark"] = "What about me?"
model.objective_direction = "max"
model.objective = rxn
write_sbml_model(model, path_to_file)
# reading the model back
model_after_reading = read_sbml_model(path_to_file)
met_after_reading = model_after_reading.metabolites.get_by_id("pyr_c")
reaction_after_reading = model_after_reading.reactions.get_by_id("R_ATPM")
# checking if notes are written to model
assert model_after_reading.notes["Remark"] == "...Model Notes..."
# checking notes for metabolite and reaction
assert met_after_reading.notes["Remark"] == "Note with \n newline"
assert reaction_after_reading.notes["Remark"] == "What about me?"
|
opencobra/cobrapy
|
src/cobra/test/test_io/test_notes.py
|
Python
|
gpl-2.0
| 1,267
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# See the COPYING file for license information.
#
# Copyright (c) 2006 Guillaume Chazarain <guichaz@gmail.com>
import asyncore
import errno
import os
import readline # Just to say we want to use it with raw_input
import signal
import socket
import subprocess
import sys
import tempfile
import termios
from threading import Thread, Event, Lock
from polysh import dispatchers, remote_dispatcher
from polysh.console import console_output, set_last_status_length
from polysh import completion
class input_buffer(object):
"""The shared input buffer between the main thread and the stdin thread"""
def __init__(self):
self.lock = Lock()
self.buf = ''
def add(self, data):
"""Add data to the buffer"""
self.lock.acquire()
try:
self.buf += data
finally:
self.lock.release()
def get(self):
"""Get the content of the buffer"""
self.lock.acquire()
try:
data = self.buf
if data:
self.buf = ''
return data
finally:
self.lock.release()
def process_input_buffer():
"""Send the content of the input buffer to all remote processes, this must
be called in the main thread"""
from polysh.control_commands_helpers import handle_control_command
data = the_stdin_thread.input_buffer.get()
remote_dispatcher.log('> ' + data)
if data.startswith(':'):
handle_control_command(data[1:-1])
return
if data.startswith('!'):
try:
retcode = subprocess.call(data[1:], shell=True)
except OSError, e:
if e.errno == errno.EINTR:
console_output('Child was interrupted\n')
retcode = 0
else:
raise
if retcode > 128 and retcode <= 192:
retcode = 128 - retcode
if retcode > 0:
console_output('Child returned %d\n' % retcode)
elif retcode < 0:
console_output('Child was terminated by signal %d\n' % -retcode)
return
for r in dispatchers.all_instances():
try:
r.dispatch_command(data)
except asyncore.ExitNow, e:
raise e
except Exception, msg:
console_output('%s for %s, disconnecting\n' % (msg, r.display_name))
r.disconnect()
else:
if r.enabled and r.state is remote_dispatcher.STATE_IDLE:
r.change_state(remote_dispatcher.STATE_RUNNING)
# The stdin thread uses a synchronous (with ACK) socket to communicate with the
# main thread, which is most of the time waiting in the poll() loop.
# Socket character protocol:
# d: there is new data to send
# A: ACK, same reply for every message, communications are synchronous, so the
# stdin thread sends a character to the socket, the main thread processes it,
# sends the ACK, and the stdin thread can go on.
class socket_notification_reader(asyncore.dispatcher):
"""The socket reader in the main thread"""
def __init__(self):
asyncore.dispatcher.__init__(self, the_stdin_thread.socket_read)
def _do(self, c):
if c == 'd':
process_input_buffer()
else:
raise Exception, 'Unknown code: %s' % (c)
def handle_read(self):
"""Handle all the available character commands in the socket"""
while True:
try:
c = self.recv(1)
except socket.error, why:
if why[0] == errno.EWOULDBLOCK:
return
else:
raise
else:
self._do(c)
self.socket.setblocking(True)
self.send('A')
self.socket.setblocking(False)
def writable(self):
"""Our writes are blocking"""
return False
def write_main_socket(c):
"""Synchronous write to the main socket, wait for ACK"""
the_stdin_thread.socket_write.send(c)
while True:
try:
the_stdin_thread.socket_write.recv(1)
except socket.error, e:
if e[0] != errno.EINTR:
raise
else:
break
#
# This file descriptor is used to interrupt readline in raw_input().
# /dev/null is not enough as it does not get out of a 'Ctrl-R' reverse-i-search.
# A Ctrl-C seems to make raw_input() return in all cases, and avoids printing
# a newline
tempfile_fd, tempfile_name = tempfile.mkstemp()
os.remove(tempfile_name)
os.write(tempfile_fd, chr(3))
def get_stdin_pid(cached_result=None):
"""Try to get the PID of the stdin thread, otherwise get the whole process
ID"""
if cached_result is None:
try:
tasks = os.listdir('/proc/self/task')
except OSError, e:
if e.errno != errno.ENOENT:
raise
cached_result = os.getpid()
else:
tasks.remove(str(os.getpid()))
assert len(tasks) == 1
cached_result = int(tasks[0])
return cached_result
def interrupt_stdin_thread():
"""The stdin thread may be in raw_input(), get out of it"""
dupped_stdin = os.dup(0) # Backup the stdin fd
assert not the_stdin_thread.interrupt_asked # Sanity check
the_stdin_thread.interrupt_asked = True # Not user triggered
os.lseek(tempfile_fd, 0, 0) # Rewind in the temp file
os.dup2(tempfile_fd, 0) # This will make raw_input() return
pid = get_stdin_pid()
os.kill(pid, signal.SIGWINCH) # Try harder to wake up raw_input()
the_stdin_thread.out_of_raw_input.wait() # Wait for this return
the_stdin_thread.interrupt_asked = False # Restore sanity
os.dup2(dupped_stdin, 0) # Restore stdin
os.close(dupped_stdin) # Cleanup
echo_enabled = True
def set_echo(echo):
global echo_enabled
if echo != echo_enabled:
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
if echo:
attr[3] |= termios.ECHO
else:
attr[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
echo_enabled = echo
class stdin_thread(Thread):
"""The stdin thread, used to call raw_input()"""
def __init__(self):
Thread.__init__(self, name='stdin thread')
completion.install_completion_handler()
@staticmethod
def activate(interactive):
"""Activate the thread at initialization time"""
the_stdin_thread.input_buffer = input_buffer()
if interactive:
the_stdin_thread.raw_input_wanted = Event()
the_stdin_thread.in_raw_input = Event()
the_stdin_thread.out_of_raw_input = Event()
the_stdin_thread.out_of_raw_input.set()
s1, s2 = socket.socketpair()
the_stdin_thread.socket_read, the_stdin_thread.socket_write = s1, s2
the_stdin_thread.interrupt_asked = False
the_stdin_thread.setDaemon(True)
the_stdin_thread.start()
the_stdin_thread.socket_notification = socket_notification_reader()
the_stdin_thread.prepend_text = None
readline.set_pre_input_hook(the_stdin_thread.prepend_previous_text)
def prepend_previous_text(self):
if self.prepend_text:
readline.insert_text(self.prepend_text)
readline.redisplay()
self.prepend_text = None
def want_raw_input(self):
nr, total = dispatchers.count_awaited_processes()
if nr:
prompt = 'waiting (%d/%d)> ' % (nr, total)
else:
prompt = 'ready (%d)> ' % total
self.prompt = prompt
set_last_status_length(len(prompt))
self.raw_input_wanted.set()
while not self.in_raw_input.isSet():
self.socket_notification.handle_read()
self.in_raw_input.wait(0.1)
self.raw_input_wanted.clear()
def no_raw_input(self):
if not self.out_of_raw_input.isSet():
interrupt_stdin_thread()
# Beware of races
def run(self):
while True:
self.raw_input_wanted.wait()
self.out_of_raw_input.set()
self.in_raw_input.set()
self.out_of_raw_input.clear()
cmd = None
try:
cmd = raw_input(self.prompt)
except EOFError:
if self.interrupt_asked:
cmd = readline.get_line_buffer()
else:
cmd = chr(4) # Ctrl-D
if self.interrupt_asked:
self.prepend_text = cmd
cmd = None
self.in_raw_input.clear()
self.out_of_raw_input.set()
if cmd:
if echo_enabled:
completion.add_to_history(cmd)
else:
completion.remove_last_history_item()
set_echo(True)
if cmd is not None:
self.input_buffer.add(cmd + '\n')
write_main_socket('d')
the_stdin_thread = stdin_thread()
|
daniyalzade/polysh
|
polysh/stdin.py
|
Python
|
gpl-2.0
| 9,733
|
from django import forms
from django.conf import settings
from idea.models import Idea
if 'core.taggit' in settings.INSTALLED_APPS:
from core.taggit.utils import add_tags
else:
pass
class IdeaForm(forms.ModelForm):
class Meta:
model = Idea
exclude = ('creator', 'time', 'state', 'voters')
def __init__(self, *args, **kwargs):
super(IdeaForm, self).__init__(*args, **kwargs)
self.fields['banner'].empty_label = "Select"
self.fields['title'].label = "What is your idea?"
self.fields['banner'].label = None
self.fields['summary'].label = "Pitch your idea"
self.fields['tags'].label = "Tag it with keywords"
self.fields['text'].label = "Give us the details"
self.fields['challenge-checkbox'] = forms.BooleanField(
required=False,
label="My idea is part of a Challenge")
for field in self.fields:
form_classes = "form-control"
if field == "banner" and 'challenge-checkbox' in self.data.keys() \
and self.data['challenge-checkbox'] == 'on':
form_classes += " active"
if field in self.data.keys() and self.data[field]:
form_classes += " populated"
self.fields[field].widget.attrs["class"] = form_classes
self.fields.keyOrder = [
'title',
'challenge-checkbox',
'banner',
'summary',
'tags',
'text']
def save(self, commit=True):
instance = super(IdeaForm, self).save(commit=False)
# add tags separately
tags = self.cleaned_data.get('tags', [])
self.cleaned_data['tags'] = []
instance.save()
try:
for t in tags:
add_tags(instance, t, None, instance.creator, 'idea')
except NameError: # catch if add_tags doesn't exist
instance.tags.add(*tags)
return instance
def set_error_css(self):
for field in self.fields:
classes_set = set(self.fields[field].widget.attrs["class"].split())
if field in self.errors.keys():
classes_set.add("input-error")
else:
classes_set.discard("input-error")
self.fields[field].widget.attrs["class"] = " ".join(classes_set)
def clean_tags(self):
""" Force tags to lowercase, since tags are case-sensitive otherwise. """
if 'tags' in self.cleaned_data:
tags = self.cleaned_data['tags']
# Account for taggit's odd special case (when a tag with spaces
# but no commas is split)
if 'tags' in self.data and u"," not in self.data['tags'] and len(tags) > 1:
tags = [self.data['tags'].strip()]
return [t.lower() for t in tags]
class UpVoteForm(forms.Form):
idea_id = forms.IntegerField(widget=forms.HiddenInput())
next = forms.CharField(max_length=512, widget=forms.HiddenInput())
class IdeaTagForm(forms.Form):
tags = forms.CharField(max_length=512,
widget=forms.TextInput(attrs={'class': 'tags_autocomplete'}))
def clean_tags(self):
""" Force tags to lowercase, since tags are case-sensitive otherwise. """
ts = self.cleaned_data['tags']
return ts.lower()
|
m3brown/idea-box
|
src/idea/forms.py
|
Python
|
cc0-1.0
| 3,356
|
"""This module implement decorators for wrapping data sources so as to
simplify their construction and attribution of properties.
"""
import functools
def data_source_generator(name=None, **properties):
"""Decorator for applying to a simple data source which directly
returns an iterable/generator with the metrics for each sample. The
function the decorator is applied to must take no arguments.
"""
def _decorator(func):
@functools.wraps(func)
def _properties(settings):
def _factory(environ):
return func
d = dict(properties)
d['name'] = name
d['factory'] = _factory
return d
return _properties
return _decorator
def data_source_factory(name=None, **properties):
"""Decorator for applying to a data source defined as a factory. The
decorator can be applied to a class or a function. The class
constructor or function must accept arguments of 'settings', being
configuration settings for the data source, and 'environ' being
information about the context in which the data source is being
used. The resulting object must be a callable which directly returns
an iterable/generator with the metrics for each sample.
"""
def _decorator(func):
@functools.wraps(func)
def _properties(settings):
def _factory(environ):
return func(settings, environ)
d = dict(properties)
d['name'] = name
d['factory'] = _factory
return d
return _properties
return _decorator
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/samplers/decorators.py
|
Python
|
agpl-3.0
| 1,626
|
from __future__ import print_function
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
from functools import reduce, partial
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_bti
from mne.io.bti.bti import (_read_config, _process_bti_headshape,
_read_bti_header, _get_bti_dev_t,
_correct_trans, _get_bti_info)
from mne.io.tests.test_raw import _test_raw_reader
from mne.tests.common import assert_dig_allclose
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne import pick_types
from mne.utils import run_tests_if_main
from mne.transforms import Transform, combine_transforms, invert_transform
from mne.externals import six
base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
archs = 'linux', 'solaris'
pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)
for a in archs]
tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
fname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH',
'c,rfDC')
# the 4D exporter doesn't export all channels, so we confine our comparison
NCH = 248
@testing.requires_testing_data
def test_read_2500():
"""Test reading data from 2500 system."""
_test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None)
def test_read_config():
"""Test read bti config file."""
# for config in config_fname, config_solaris_fname:
for config in config_fnames:
cfg = _read_config(config)
assert all('unknown' not in block.lower() and block != ''
for block in cfg['user_blocks'])
def test_crop_append():
"""Test crop and append raw."""
raw = _test_raw_reader(
read_raw_bti, pdf_fname=pdf_fnames[0],
config_fname=config_fnames[0], head_shape_fname=hs_fnames[0])
y, t = raw[:]
t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
mask = (t0 <= t) * (t <= t1)
raw_ = raw.copy().crop(t0, t1)
y_, _ = raw_[:]
assert (y_.shape[1] == mask.sum())
assert (y_.shape[0] == y.shape[0])
def test_transforms():
"""Test transformations."""
bti_trans = (0.0, 0.02, 0.11)
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, preload=False)
dev_ctf_t = raw.info['dev_ctf_t']
dev_head_t_old = raw.info['dev_head_t']
ctf_head_t = raw.info['ctf_head_t']
# 1) get BTI->Neuromag
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
# 2) get Neuromag->BTI head
t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
'meg', 'ctf_head')
# 3) get Neuromag->head
dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')
assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])
@pytest.mark.slowtest
def test_raw():
"""Test bti conversion to Raw object."""
for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
exported_fnames):
# rx = 2 if 'linux' in pdf else 0
pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False)
pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam',
preload=False)
if op.exists(tmp_raw_fname):
os.remove(tmp_raw_fname)
ex = read_raw_fif(exported, preload=True)
ra = read_raw_bti(pdf, config, hs, preload=False)
assert ('RawBTi' in repr(ra))
assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
assert_array_almost_equal(ex.info['dev_head_t']['trans'],
ra.info['dev_head_t']['trans'], 7)
assert len(ex.info['dig']) in (3563, 5154)
assert_dig_allclose(ex.info, ra.info, limit=100)
coil1, coil2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_array_almost_equal(coil1, coil2, 7)
loc1, loc2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_allclose(loc1, loc2)
assert_allclose(ra[:NCH][0], ex[:NCH][0])
assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]],
[c['range'] for c in ex.info['chs'][:NCH]])
assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]],
[c['cal'] for c in ex.info['chs'][:NCH]])
assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
# check our transforms
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
if ex.info[key] is None:
pass
else:
assert (ra.info[key] is not None)
for ent in ('to', 'from', 'trans'):
assert_allclose(ex.info[key][ent],
ra.info[key][ent])
ra.save(tmp_raw_fname)
re = read_raw_fif(tmp_raw_fname)
print(re)
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert (isinstance(re.info[key], dict))
this_t = re.info[key]['trans']
assert_equal(this_t.shape, (4, 4))
# check that matrix by is not identity
assert (not np.allclose(this_t, np.eye(4)))
os.remove(tmp_raw_fname)
def test_info_no_rename_no_reorder_no_pdf():
"""Test private renaming, reordering and partial construction option."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
info, bti_info = _get_bti_info(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
info2, bti_info = _get_bti_info(
pdf_fname=None, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
assert_equal(info['ch_names'],
[ch['ch_name'] for ch in info['chs']])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],
['A22', 'A2', 'A104', 'A241', 'A138'])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],
['A133', 'A158', 'A44', 'A134', 'A216'])
info = pick_info(info, pick_types(info, meg=True, stim=True,
resp=True))
info2 = pick_info(info2, pick_types(info2, meg=True, stim=True,
resp=True))
assert (info['sfreq'] is not None)
assert (info['lowpass'] is not None)
assert (info['highpass'] is not None)
assert (info['meas_date'] is not None)
assert_equal(info2['sfreq'], None)
assert_equal(info2['lowpass'], None)
assert_equal(info2['highpass'], None)
assert_equal(info2['meas_date'], None)
assert_equal(info['ch_names'], info2['ch_names'])
assert_equal(info['ch_names'], info2['ch_names'])
for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']:
assert_array_equal(info[key]['trans'], info2[key]['trans'])
assert_array_equal(
np.array([ch['loc'] for ch in info['chs']]),
np.array([ch['loc'] for ch in info2['chs']]))
# just check reading data | corner case
raw1 = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=None,
sort_by_ch_name=False, preload=True)
# just check reading data | corner case
raw2 = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=None,
rename_channels=False,
sort_by_ch_name=True, preload=True)
sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels]
raw1._data = raw1._data[sort_idx]
assert_array_equal(raw1._data, raw2._data)
assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)
def test_no_conversion():
"""Test bti no-conversion option."""
get_info = partial(
_get_bti_info,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
raw_info, _ = get_info(pdf, config, hs, convert=False)
raw_info_con = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
convert=True, preload=False).info
pick_info(raw_info_con,
pick_types(raw_info_con, meg=True, ref_meg=True),
copy=False)
pick_info(raw_info,
pick_types(raw_info, meg=True, ref_meg=True), copy=False)
bti_info = _read_bti_header(pdf, config)
dev_ctf_t = _correct_trans(bti_info['bti_transform'][0])
assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans'])
assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4))
assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4))
dig, t = _process_bti_headshape(hs, convert=False, use_hpi=False)
assert_array_equal(t['trans'], np.eye(4))
for ii, (old, new, con) in enumerate(zip(
dig, raw_info['dig'], raw_info_con['dig'])):
assert_equal(old['ident'], new['ident'])
assert_array_equal(old['r'], new['r'])
assert (not np.allclose(old['r'], con['r']))
if ii > 10:
break
ch_map = dict((ch['chan_label'],
ch['loc']) for ch in bti_info['chs'])
for ii, ch_label in enumerate(raw_info['ch_names']):
if not ch_label.startswith('A'):
continue
t1 = ch_map[ch_label] # correction already performed in bti_info
t2 = raw_info['chs'][ii]['loc']
t3 = raw_info_con['chs'][ii]['loc']
assert_allclose(t1, t2, atol=1e-15)
assert (not np.allclose(t1, t3))
idx_a = raw_info_con['ch_names'].index('MEG 001')
idx_b = raw_info['ch_names'].index('A22')
assert_equal(
raw_info_con['chs'][idx_a]['coord_frame'],
FIFF.FIFFV_COORD_DEVICE)
assert_equal(
raw_info['chs'][idx_b]['coord_frame'],
FIFF.FIFFV_MNE_COORD_4D_HEAD)
def test_bytes_io():
"""Test bti bytes-io API."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, convert=True, preload=False)
with open(pdf, 'rb') as fid:
pdf = six.BytesIO(fid.read())
with open(config, 'rb') as fid:
config = six.BytesIO(fid.read())
with open(hs, 'rb') as fid:
hs = six.BytesIO(fid.read())
raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False)
repr(raw2)
assert_array_equal(raw[:][0], raw2[:][0])
def test_setup_headshape():
"""Test reading bti headshape."""
for hs in hs_fnames:
dig, t = _process_bti_headshape(hs)
expected = set(['kind', 'ident', 'r'])
found = set(reduce(lambda x, y: list(x) + list(y),
[d.keys() for d in dig]))
assert (not expected - found)
run_tests_if_main()
|
teonlamont/mne-python
|
mne/io/bti/tests/test_bti.py
|
Python
|
bsd-3-clause
| 12,138
|
from hk2.types.annotations import ClassAnnotation
#===========================================================
class Contract(ClassAnnotation):
pass
|
mikhtonyuk/pyhk2
|
hk2/annotations/contract.py
|
Python
|
mit
| 155
|
import os
base_url = os.environ.get('DP_BASE_URL', '')
base_dir = os.environ.get('DP_APP_ROOT', os.path.dirname(os.path.realpath(__file__)) + '/..')
adapters = {
'core': 'deployer.core',
'aws': 'deployer.aws'
}
session = {
'session.type': 'file',
'session.cookie_expires': False,
'session.data_dir': '/tmp/sessions',
'session.httponly': True,
'session.key': 'deployer.session.id'
}
default_region = os.environ.get('AWS_DEFAULT_REGION', None)
ecr_registry = os.environ.get('DP_ECR_REGISTRY', '')
ecs_region = os.environ.get('DP_ECS_REGION', default_region or 'us-east-1')
ecr_region = os.environ.get('DP_ECR_REGION', default_region or 'us-east-1')
s3_region = os.environ.get('DP_S3_REGION', default_region or 'us-east-1')
scotty_yml_s3_bucket = os.environ.get('DP_SCOTTY_YML_S3_BUCKET', None)
scotty_yml_s3_key = os.environ.get('DP_SCOTTY_YML_S3_KEY', 'dummy_scotty.yml')
|
filc/python-aws-ecr-deployer
|
config/application.py
|
Python
|
apache-2.0
| 905
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.utils import timezone
from django.db import models
from ckeditor.fields import RichTextField
from datetime import date
class Contact(CMSPlugin):
number = models.CharField(max_length=128)
email = models.EmailField()
text = RichTextField(null=True, blank=True, verbose_name=_('First Line Text'),
help_text='First line text')
class ComingSoon(CMSPlugin):
email = models.EmailField()
page_heading = models.CharField(max_length=128)
description = RichTextField(null=True, blank=True, verbose_name=_('First Line Text'),
help_text='First line text')
|
pmutale/www.mutale.nl
|
themes/models.py
|
Python
|
unlicense
| 768
|
from unquote import unquote
def parse_query(query, keep_blank_values=0, strict_parsing=0):
"""Parse a URL query string and return the components as a dictionary.
Based on the cgi.parse_qs method. This is a utility function provided
by urlparse so that users need not use the cgi module for
parsing the url query string.
Arguments:
url: URL with query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
pairs = [s2 for s1 in query.split('&') for s2 in s1.split(';')]
query = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
query.append((name, value))
dict = {}
for name, value in query:
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
|
smurfix/pywsgi
|
src/pywsgi/util/parse_query.py
|
Python
|
gpl-2.0
| 1,859
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'consoles')
class ConsolesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self.compute_api = compute.API()
super(ConsolesController, self).__init__(*args, **kwargs)
@wsgi.action('os-getVNCConsole')
def get_vnc_console(self, req, id, body):
"""Get text console output."""
context = req.environ['nova.context']
authorize(context)
console_type = body['os-getVNCConsole'].get('type')
if not console_type:
raise webob.exc.HTTPBadRequest(_('Missing type specification'))
try:
instance = self.compute_api.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
try:
output = self.compute_api.get_vnc_console(context,
instance,
console_type)
except exception.ConsoleTypeInvalid, e:
raise webob.exc.HTTPBadRequest(_('Invalid type specification'))
except exception.NotAuthorized:
raise webob.exc.HTTPUnauthorized()
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
return {'console': {'type': console_type, 'url': output['url']}}
def get_actions(self):
"""Return the actions the extension adds, as required by contract."""
actions = [extensions.ActionExtension("servers", "os-getVNCConsole",
self.get_vnc_console)]
return actions
class Consoles(extensions.ExtensionDescriptor):
"""Interactive Console support."""
name = "Consoles"
alias = "os-consoles"
namespace = "http://docs.openstack.org/compute/ext/os-consoles/api/v2"
updated = "2011-12-23T00:00:00+00:00"
def get_controller_extensions(self):
controller = ConsolesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
usc-isi/extra-specs
|
nova/api/openstack/compute/contrib/consoles.py
|
Python
|
apache-2.0
| 2,973
|
"""
FEZ type readers!
"""
from __future__ import print_function
from xnb_parse.type_readers.fez import fez_basic, fez_graphics, fez_level, fez_music
__all__ = ['fez_basic', 'fez_graphics', 'fez_level', 'fez_music']
|
fesh0r/xnb_parse
|
xnb_parse/type_readers/fez/__init__.py
|
Python
|
mit
| 219
|
'''
Test cases for pyclbr.py
Nick Mathewson
'''
import sys
from types import FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase, main as unittest_main
StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))
# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module. Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.
class PyclbrTest(TestCase):
def assertListEq(self, l1, l2, ignore):
''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
missing = (set(l1) ^ set(l2)) - set(ignore)
if missing:
print("l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore), file=sys.stderr)
self.fail("%r missing" % missing.pop())
def assertHasattr(self, obj, attr, ignore):
''' succeed iff hasattr(obj,attr) or attr in ignore. '''
if attr in ignore: return
if not hasattr(obj, attr): print("???", attr)
self.assertTrue(hasattr(obj, attr),
'expected hasattr(%r, %r)' % (obj, attr))
def assertHaskey(self, obj, key, ignore):
''' succeed iff key in obj or key in ignore. '''
if key in ignore: return
if key not in obj:
print("***",key, file=sys.stderr)
self.assertIn(key, obj)
def assertEqualsOrIgnored(self, a, b, ignore):
''' succeed iff a == b or a in ignore or b in ignore '''
if a not in ignore and b not in ignore:
self.assertEqual(a, b)
def checkModule(self, moduleName, module=None, ignore=()):
''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
to the actual module object, module. Any identifiers in
ignore are ignored. If no module is provided, the appropriate
module is loaded with __import__.'''
ignore = set(ignore) | set(['object'])
if module is None:
# Import it.
# ('<silly>' is to work around an API silliness in __import__)
module = __import__(moduleName, globals(), {}, ['<silly>'])
dict = pyclbr.readmodule_ex(moduleName)
def ismethod(oclass, obj, name):
classdict = oclass.__dict__
if isinstance(obj, MethodType):
# could be a classmethod
if (not isinstance(classdict[name], ClassMethodType) or
obj.__self__ is not oclass):
return False
elif not isinstance(obj, FunctionType):
return False
objname = obj.__name__
if objname.startswith("__") and not objname.endswith("__"):
objname = "_%s%s" % (oclass.__name__, objname)
return objname == name
# Make sure the toplevel functions and classes are the same.
for name, value in dict.items():
if name in ignore:
continue
self.assertHasattr(module, name, ignore)
py_item = getattr(module, name)
if isinstance(value, pyclbr.Function):
self.assertIsInstance(py_item, (FunctionType, BuiltinFunctionType))
if py_item.__module__ != moduleName:
continue # skip functions that came from somewhere else
self.assertEqual(py_item.__module__, value.module)
else:
self.assertIsInstance(py_item, type)
if py_item.__module__ != moduleName:
continue # skip classes that came from somewhere else
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
try:
self.assertListEq(real_bases, pyclbr_bases, ignore)
except:
print("class=%s" % py_item, file=sys.stderr)
raise
actualMethods = []
for m in py_item.__dict__.keys():
if ismethod(py_item, getattr(py_item, m), m):
actualMethods.append(m)
foundMethods = []
for m in value.methods.keys():
if m[:2] == '__' and m[-2:] != '__':
foundMethods.append('_'+name+m)
else:
foundMethods.append(m)
try:
self.assertListEq(foundMethods, actualMethods, ignore)
self.assertEqual(py_item.__module__, value.module)
self.assertEqualsOrIgnored(py_item.__name__, value.name,
ignore)
# can't check file or lineno
except:
print("class=%s" % py_item, file=sys.stderr)
raise
# Now check for missing stuff.
def defined_in(item, module, name):
if item.__name__ != name:
# Item was defined with another name
return False
if isinstance(item, type):
return item.__module__ == module.__name__
if isinstance(item, FunctionType):
return item.__globals__ is module.__dict__
return False
for name in dir(module):
item = getattr(module, name)
if isinstance(item, (type, FunctionType)):
if defined_in(item, module, name):
self.assertHaskey(dict, name, ignore)
def test_easy(self):
self.checkModule('pyclbr')
self.checkModule('ast')
self.checkModule('doctest', ignore=("TestResults", "_SpoofOut",
"DocTestCase", '_DocTestSuite'))
self.checkModule('difflib', ignore=("Match",))
def test_decorators(self):
# XXX: See comment in pyclbr_input.py for a test that would fail
# if it were not commented out.
#
self.checkModule('test.pyclbr_input', ignore=['om'])
def test_others(self):
cm = self.checkModule
# These were once about the 10 longest modules
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('cgi', ignore=('log',)) # set with = in module
cm('pickle', ignore=('partial',))
cm('aifc', ignore=('openfp', '_aifc_params')) # set with = in module
cm('sre_parse', ignore=('dump', 'groups', 'pos')) # from sre_constants import *; property
cm('pdb')
cm('pydoc')
# Tests for modules inside packages
cm('email.parser')
cm('test.test_pyclbr')
def test_issue_14798(self):
# test ImportError is raised when the first part of a dotted name is
# not a package
self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo')
if __name__ == "__main__":
unittest_main()
|
yotchang4s/cafebabepy
|
src/main/python/test/test_pyclbr.py
|
Python
|
bsd-3-clause
| 7,112
|
compiler = './icc.py'
mpicompiler = './icc.py'
mpilinker = 'MPICH_CC=gcc mpicc'
scalapack = True
library_dirs += ['/opt/intel/Compiler/11.0/074/mkl/lib/em64t']
libraries = ['mkl_intel_lp64' ,'mkl_sequential' ,'mkl_core',
'mkl_lapack',
'mkl_scalapack_lp64', 'mkl_blacs_intelmpi_lp64',
'pthread'
]
libraries += ['xc']
# change this to your installation directory
LIBXCDIR='/lustre/jhome5/hfr04/hfr047/gridpaw/libxc-2.0.2/install/'
library_dirs += [LIBXCDIR + 'lib']
include_dirs += [LIBXCDIR + 'include']
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
define_macros += [("GPAW_ASYNC",1)]
define_macros += [("GPAW_MPI2",1)]
|
robwarm/gpaw-symm
|
doc/install/Linux/customize_juropa_icc_libxc.py
|
Python
|
gpl-3.0
| 743
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import datetime
import logging
import os
import zipfile
from gi.repository import Gtk
DATE_FORMAT = '%Y-%m-%d'
MAX_BACKUP_AGE = 30
BACKUP_NOW = 100
ASK_NEXT_TIME = 200
NEVER_ASK_AGAIN = 300
def write_archive(archive_file_name, files, base_dir='', arc_base_dir=''):
"""
Use base_dir for relative filenames, in case you don't
want your archive to contain '/home/...'
"""
archive = zipfile.ZipFile(archive_file_name, mode="w", compression=zipfile.ZIP_DEFLATED)
for file in files:
archive.write(file, os.path.join(arc_base_dir, file[len(base_dir):]))
archive.close()
class Archiver:
def __init__(self, journal):
self.journal = journal
def check_last_backup_date(self):
if not self._backup_necessary():
return
logging.warning('Last backup is older than %d days.' % MAX_BACKUP_AGE)
text1 = _('It has been a while since you made your last backup.')
text2 = _('You can backup your journal to a zip file to avoid data loss.')
dialog = Gtk.MessageDialog(
parent=self.journal.frame.main_frame,
type=Gtk.MessageType.QUESTION,
flags=Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
message_format=text1)
dialog.set_title(_('Backup'))
dialog.format_secondary_text(text2)
dialog.add_buttons(
_('Backup now'), BACKUP_NOW,
_('Ask at next start'), ASK_NEXT_TIME,
_('Never ask again'), NEVER_ASK_AGAIN)
answer = dialog.run()
dialog.hide()
if answer == BACKUP_NOW:
self.backup()
elif answer == ASK_NEXT_TIME:
pass
elif answer == NEVER_ASK_AGAIN:
self.journal.config['lastBackupDate'] = datetime.datetime.max.strftime(DATE_FORMAT)
def backup(self):
backup_file = self._get_backup_file()
# Abort if user did not select a path.
if not backup_file:
return
self.journal.save_to_disk()
data_dir = self.journal.dirs.data_dir
archive_files = []
for root, dirs, files in os.walk(data_dir):
for file in files:
if not file.endswith('~') and 'RedNotebook-Backup' not in file:
archive_files.append(os.path.join(root, file))
write_archive(backup_file, archive_files, data_dir)
logging.info('The content has been backed up at %s' % backup_file)
self.journal.config['lastBackupDate'] = datetime.datetime.now().strftime(DATE_FORMAT)
self.journal.config['lastBackupDir'] = os.path.dirname(backup_file)
def _backup_necessary(self):
now = datetime.datetime.now()
date_string = self.journal.config.read('lastBackupDate', now.strftime(DATE_FORMAT))
try:
last_backup_date = datetime.datetime.strptime(date_string, DATE_FORMAT)
except ValueError as err:
logging.error('Last backup date could not be read: %s' % err)
return True
last_backup_age = (now - last_backup_date).days
logging.info('Last backup was made %d days ago' % last_backup_age)
return last_backup_age > MAX_BACKUP_AGE
def _get_backup_file(self):
if self.journal.title == 'data':
name = ''
else:
name = '-' + self.journal.title
proposed_filename = 'RedNotebook-Backup%s-%s.zip' % (name, datetime.date.today())
proposed_directory = self.journal.config.read(
'lastBackupDir', os.path.expanduser('~'))
backup_dialog = self.journal.frame.builder.get_object('backup_dialog')
backup_dialog.set_transient_for(self.journal.frame.main_frame)
backup_dialog.set_current_folder(proposed_directory)
backup_dialog.set_current_name(proposed_filename)
filter = Gtk.FileFilter()
filter.set_name("Zip")
filter.add_pattern("*.zip")
backup_dialog.add_filter(filter)
response = backup_dialog.run()
backup_dialog.hide()
if response == Gtk.ResponseType.OK:
path = backup_dialog.get_filename()
return path
|
jendrikseipp/rednotebook-elementary
|
rednotebook/backup.py
|
Python
|
gpl-2.0
| 5,067
|
from mod_base import *
class Calc(Command):
"""Calculate math expressions. e.g. calc 3+4*6"""
def run(self, win, user, data, caller=None):
"""Evaluate a python expression semi-safely."""
if not data:
win.Send("specify what to calculate")
else:
try:
# Remove quotes to prevent attacks like "spam"*100000
data = data.replace("\"", "")
data = data.replace("'", "")
result = eval(data, {"__builtins__": {}})
win.Send(str(result))
except:
win.Send("failed to calculate")
module = {
"class": Calc,
"type": MOD_COMMAND,
"level": 0,
}
|
richrd/bx
|
modules/calc.py
|
Python
|
apache-2.0
| 706
|
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
stefanklug/mapnik
|
scons/scons-local-2.3.6/SCons/Tool/mssdk.py
|
Python
|
lgpl-2.1
| 1,834
|
from __future__ import absolute_import
from __future__ import print_function
from zerver.lib.test_classes import ZulipTestCase
class CompatibilityTest(ZulipTestCase):
def test_compatibility(self):
# type: () -> None
result = self.client_get("/compatibility", HTTP_USER_AGENT='ZulipMobile/5.0')
self.assert_json_success(result)
result = self.client_get("/compatibility", HTTP_USER_AGENT='ZulipInvalid/5.0')
self.assert_json_error(result, "Client is too old")
|
sonali0901/zulip
|
zerver/tests/test_compatibility.py
|
Python
|
apache-2.0
| 504
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class NcbiToolkit(AutotoolsPackage):
"""NCBI C++ Toolkit"""
homepage = "https://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/"
url = "ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools++/CURRENT/ncbi_cxx--21_0_0.tar.gz"
version('21_0_0', '14e021e08b1a78ac9cde98d0cab92098')
depends_on('boost@1.35.0:')
depends_on('bzip2')
depends_on('libjpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
depends_on('libxslt@1.1.14:')
depends_on('lzo')
depends_on('pcre')
depends_on('giflib')
depends_on('sqlite@3.6.6:')
depends_on('zlib')
depends_on('samtools')
depends_on('bamtools')
def configure_args(self):
return ['--without-sybase', '--without-fastcgi']
def patch(self):
with working_dir(join_path('src', 'util', 'image')):
filter_file(r'jpeg_start_compress(&cinfo, true)',
'jpeg_start_compress(&cinfo, TRUE)',
'image_io_jpeg.cpp', string=True)
def build(self, spec, prefix):
compiler_version = self.compiler.version.joined
with working_dir(join_path(
'GCC{0}-DebugMT64'.format(compiler_version), 'build')):
make('all_r')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/ncbi-toolkit/package.py
|
Python
|
lgpl-2.1
| 2,492
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base handler class for all mapreduce handlers."""
# pylint: disable=g-bad-name
import httplib
import logging
from mapreduce.lib import simplejson
try:
from mapreduce.lib import pipeline
except ImportError:
pipeline = None
from google.appengine.ext import webapp
from mapreduce import errors
from mapreduce import model
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadRequestPathError(Error):
"""The request path for the handler is invalid."""
class BaseHandler(webapp.RequestHandler):
"""Base class for all mapreduce handlers."""
def base_path(self):
"""Base path for all mapreduce-related urls."""
path = self.request.path
return path[:path.rfind("/")]
class TaskQueueHandler(BaseHandler):
"""Base class for handlers intended to be run only from the task queue.
Sub-classes should implement the 'handle' method.
"""
def post(self):
if "X-AppEngine-QueueName" not in self.request.headers:
logging.error(self.request.headers)
logging.error("Task queue handler received non-task queue request")
self.response.set_status(
403, message="Task queue handler received non-task queue request")
return
self.handle()
def handle(self):
"""To be implemented by subclasses."""
raise NotImplementedError()
def task_retry_count(self):
"""Number of times this task has been retried."""
return int(self.request.headers.get("X-AppEngine-TaskExecutionCount", 0))
def retry_task(self):
"""Ask taskqueue to retry this task.
Even though raising an exception can cause a task retry, it
will flood logs with highly visible ERROR logs. Handlers should uses
this method to perform controlled task retries. Only raise exceptions
for those deserve ERROR log entries.
"""
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task")
self.response.clear()
class JsonHandler(BaseHandler):
"""Base class for JSON handlers for user interface.
Sub-classes should implement the 'handle' method. They should put their
response data in the 'self.json_response' dictionary. Any exceptions raised
by the sub-class implementation will be sent in a JSON response with the
name of the error_class and the error_message.
"""
def __init__(self, *args):
"""Initializer."""
super(BaseHandler, self).__init__(*args)
self.json_response = {}
def base_path(self):
"""Base path for all mapreduce-related urls.
JSON handlers are mapped to /base_path/command/command_name thus they
require special treatment.
"""
path = self.request.path
base_path = path[:path.rfind("/")]
if not base_path.endswith("/command"):
raise BadRequestPathError(
"Json handlers should have /command path prefix")
return base_path[:base_path.rfind("/")]
def _handle_wrapper(self):
if self.request.headers.get("X-Requested-With") != "XMLHttpRequest":
logging.error("Got JSON request with no X-Requested-With header")
self.response.set_status(
403, message="Got JSON request with no X-Requested-With header")
return
self.json_response.clear()
try:
self.handle()
except errors.MissingYamlError:
logging.debug("Could not find 'mapreduce.yaml' file.")
self.json_response.clear()
self.json_response["error_class"] = "Notice"
self.json_response["error_message"] = "Could not find 'mapreduce.yaml'"
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
# TODO(user): Include full traceback here for the end-user.
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = simplejson.dumps(self.json_response, cls=model.JsonEncoder)
except:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
def handle(self):
"""To be implemented by sub-classes."""
raise NotImplementedError()
class PostJsonHandler(JsonHandler):
"""JSON handler that accepts POST requests."""
def post(self):
self._handle_wrapper()
class GetJsonHandler(JsonHandler):
"""JSON handler that accepts GET posts."""
def get(self):
self._handle_wrapper()
class HugeTaskHandler(TaskQueueHandler):
"""Base handler for processing HugeTasks."""
class _RequestWrapper(object):
def __init__(self, request):
self._request = request
self._params = model.HugeTask.decode_payload(request)
def get(self, name, default=""):
return self._params.get(name, default)
def set(self, name, value):
self._params[name] = value
def __getattr__(self, name):
return getattr(self._request, name)
def __init__(self, *args, **kwargs):
super(HugeTaskHandler, self).__init__(*args, **kwargs)
def initialize(self, request, response):
super(HugeTaskHandler, self).initialize(request, response)
self.request = self._RequestWrapper(self.request)
# This path will be changed by build process when this is a part of SDK.
_DEFAULT_BASE_PATH = "/mapreduce"
_DEFAULT_PIPELINE_BASE_PATH = _DEFAULT_BASE_PATH + "/pipeline"
if pipeline:
class PipelineBase(pipeline.Pipeline):
"""Base class for all pipelines within mapreduce framework.
Rewrites base path to use pipeline library bundled with mapreduce.
"""
def start(self, **kwargs):
if "base_path" not in kwargs:
kwargs["base_path"] = _DEFAULT_PIPELINE_BASE_PATH
return pipeline.Pipeline.start(self, **kwargs)
else:
PipelineBase = None
|
johnwlockwood/appengine-mapreduce
|
python/src/mapreduce/base_handler.py
|
Python
|
apache-2.0
| 6,390
|
# Standard library imports
import random
# Related third party imports
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises_regexp, assert_true
# Local application/library specific imports
import pval_correction as my_crt
def test_correct():
"""Testing function correct"""
# Random input 1-D array p of random size and with 2 nan enteries
size = random.randint(1, 5)
p = np.random.rand(size, )
idx = random.randint(0, int(size / 3))
p[idx] = np.nan
p[2 * idx] = np.nan
p_isnan = np.zeros(p.shape, dtype=bool)
p_isnan[idx] = True
p_isnan[2 * idx] = True
p_isnotnan = np.logical_not(p_isnan)
# Test output for different correction values
q_uncorrected = my_crt.correct(p)
q_bonferroni = my_crt.correct(p, correction="bonferroni")
q_fdr = my_crt.correct(p, correction="fdr")
assert_array_almost_equal(q_uncorrected, p)
for q in [q_uncorrected, q_bonferroni, q_fdr]:
assert_array_equal(p_isnan, np.isnan(q))
assert_true(np.all(p[p_isnotnan] <= q[p_isnotnan]))
# Check error is raised for unkown value of correction
with assert_raises_regexp(ValueError, "Unknown correction."):
my_crt.correct(p, correction="blabla")
def test_fdr():
"""Testing function fdr"""
|
rphlypo/parietalretreat
|
test_pval_correction.py
|
Python
|
bsd-2-clause
| 1,341
|
# -*- coding: utf-8 -*-
"""
httpbin.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power httpbin.
"""
class CaseInsensitiveDict(dict):
"""Case-insensitive Dictionary for headers.
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header.
"""
def _lower_keys(self):
return map(str.lower, self.keys())
def __contains__(self, key):
return key.lower() in self._lower_keys()
def __getitem__(self, key):
# We allow fall-through here, so values default to None
if key in self:
return self.items()[self._lower_keys().index(key.lower())][1]
|
cityindex-attic/python-development-flow
|
src/httpbin/structures.py
|
Python
|
apache-2.0
| 670
|
"""
Django settings for inventory project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1x0tvcm4zv7hf#bq+#4m^_gw8yd#i&&(=rrk1%k496eqgs!o62'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'keepstuff.apps.KeepstuffConfig',
'django_tables2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'inventory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'inventory.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
ivh/BrewInventory
|
inventory/settings.py
|
Python
|
gpl-3.0
| 3,238
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import MSSQL_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.mssqlserver.enumeration import Enumeration
from plugins.dbms.mssqlserver.filesystem import Filesystem
from plugins.dbms.mssqlserver.fingerprint import Fingerprint
from plugins.dbms.mssqlserver.syntax import Syntax
from plugins.dbms.mssqlserver.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class MSSQLServerMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines Microsoft SQL Server methods
"""
def __init__(self):
self.excludeDbsList = MSSQL_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.MSSQL] = Syntax.escape
|
golismero/golismero
|
tools/sqlmap/plugins/dbms/mssqlserver/__init__.py
|
Python
|
gpl-2.0
| 1,081
|
# ----------------------------------------------------------------------------
# Copyright 2014-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import map, str, zip
from future.utils import native
from collections import deque
import h5py
import inspect
import logging
import numpy as np
import os
import signal
import sys
import time
from timeit import default_timer
import weakref
from neon import NervanaObject, logger as neon_logger
from neon.data import NervanaDataIterator, Ticker
from neon.util.compat import PY3
from neon.util.persist import load_obj, save_obj, load_class
from neon.layers import Convolution, BatchNorm, Multicost
from neon.transforms.cost import Metric
logger = logging.getLogger(__name__)
class Callbacks(NervanaObject):
"""
Container class for storing and iterating over callbacks.
Attributes:
callbacks (list): Ordered set of Callback objects to be run.
"""
def __init__(self, model,
train_set=None,
output_file=None,
eval_freq=None,
progress_bar=True,
save_path=None,
serialize=0,
history=1,
model_file=None,
eval_set=None,
metric=None,
log_token=None):
"""
Create a callbacks container with the default callbacks.
Arguments:
model (Model): the model object
output_file (string, optional): path to save callback data to
eval_freq (int, optional): how often (in epochs) to run evaluation
progress_bar (bool): control whether a progress bar callback is created.
Defaults to True.
save_path (string): file path to save model snapshots (default: None)
serialize (int): serialize model every N epochs (default: 0)
history (int): number of checkpoint files to retain (default: 1)
model_file(string, optional): file to load weights (serialized model) from
eval_set (NervanaDataIterator, optional): the dataset upon which to evaluate
loss or metric
metric (Metric, optional): metric to evaluate
"""
# once the deprecated args are removed the kwargs will also be removed
# as well as the code below
# epochs had to be completely remove since it is often passed by argparser args
if train_set is not None:
logger.warning("Deprecation warning. Callbacks class no longer "
"accepts train_set as a parameter. This argument will "
"be removed soon update your code.")
super(Callbacks, self).__init__(name=None)
self.callbacks = list()
self.epoch_marker = 0
self.output_file = output_file
if output_file is None:
if hasattr(self, 'callback_data'):
del self.callback_data
# self.name sould give a unique filename
self.callback_data = h5py.File(self.name, driver='core', backing_store=False)
else:
if os.path.isfile(output_file):
logger.warn("Overwriting output file %s", output_file)
os.remove(output_file)
self.callback_data = h5py.File(output_file, "w")
self.model = weakref.ref(model)
self.model_file = model_file
self.add_callback(TrainCostCallback())
if progress_bar:
self.add_callback(ProgressBarCallback())
if eval_freq:
if not eval_set:
err_msg = 'Evaluation frequency specified but no eval set provided!'
logger.exception(err_msg)
raise ValueError(err_msg)
ecb = LossCallback(eval_set, eval_freq)
self.add_callback(ecb, insert_pos=0)
if metric:
ecb = MetricCallback(eval_set, metric, eval_freq)
self.add_callback(ecb, insert_pos=None)
self.save_path = save_path
if save_path:
serialize_interval = serialize if serialize > 1 else 1
scb = SerializeModelCallback(save_path, serialize_interval, history)
self.add_callback(scb)
self.add_callback(TrainLoggerCallback())
self.add_callback(RunTimerCallback())
try:
# Register if it exists
from cloud_metrics import CloudMetricsCallback
self.add_callback(CloudMetricsCallback(log_token, eval_freq, metric))
except ImportError:
pass
def __del__(self):
try:
self.callback_data.close()
except Exception:
pass
def serialize(self):
"""
Serialize callback configuration.
"""
return self.get_description()
def get_description(self):
"""
Serialize callback configuration.
"""
cdict = {}
cdict['epoch_marker'] = self.epoch_marker
cdict['output_file'] = self.output_file
cdict['callbacks'] = []
for callback in self.callbacks:
cdict['callbacks'].append(callback.get_description())
return cdict
@classmethod
def load_callbacks(cls, cdict, model, data=[]):
"""
Load callbacks.
"""
if type(native(cdict)) is str:
cdict = load_obj(cdict)
callbacks = cls(model, output_file=cdict['output_file'])
callbacks.epoch_marker = cdict['epoch_marker']
callbacks.callbacks = []
for cb in cdict['callbacks']:
module = load_class(cb['type'])
callbacks.callbacks.append(module(**cb['config']))
return callbacks
def add_deconv_callback(self, train_set, valid_set, max_fm=16, dataset_pct=25):
"""
Convenience function to create and add a deconvolution callback. The data can
be used for visualization.
Arguments:
train_set (NervanaDataIterator): the train dataset to use
valid_set (NervanaDataIterator):the validation dataset to use
max_fm: (Default value = 16)
dataset_pct: (Default value = 25)
"""
self.add_callback(DeconvCallback(train_set, valid_set,
max_fm=max_fm, dataset_pct=dataset_pct))
def add_save_best_state_callback(self, path):
"""
Convenience function to create and add a save best state callback.
Arguments:
path (string): where to save the best model state.
"""
self.add_callback(SaveBestStateCallback(path))
def add_watch_ticker_callback(self, valid):
"""
Convenience function to create and add a watch ticker callback.
Arguments:
valid (dataset): the validation set to use
For a ticker dataset, this can be the training set if desired.
"""
self.callbacks.append(WatchTickerCallback(self.model, valid))
def add_early_stop_callback(self, stop_func):
"""
Convenience function to create and add an early stopping callback.
Arguments:
stop_func (function): function to determine when to stop.
"""
self.add_callback(EarlyStopCallback(stop_func))
def add_hist_callback(self, plot_per_mini=False, filter_key=['W']):
"""
Convenience function to create and add a histgram callback.
"""
self.callbacks.append(HistCallback(plot_per_mini=plot_per_mini, filter_key=filter_key))
def add_callback(self, callback, insert_pos=None):
"""
Add a user supplied callback. Since callbacks are run serially and share data,
order can matter. If the default behavior (to append the callback) is not
sufficient, insert position can be controlled.
Arguments:
callback (Callback): callback object to be registered
insert_pos (int, optional): position in the list to insert the callback.
Defaults to None, meaning append
"""
if insert_pos is None:
self.callbacks.append(callback)
else:
self.callbacks.insert(insert_pos, callback)
def on_train_begin(self, epochs):
"""
Call all registered callbacks' on_train_begin functions.
Arguments:
epochs (int): Total epochs
"""
# data iterator wraps around to avoid partial minibatches
# callbacks producing per-minibatch data need a way to preallocate buffers
config = self.callback_data.create_group('config')
total_minibatches = -((-self.model().ndata * epochs) // self.be.bsz)
config.attrs['total_minibatches'] = total_minibatches
config.attrs['total_epochs'] = epochs
time_markers = self.callback_data.create_group("time_markers")
time_markers.create_dataset("minibatch", (epochs,))
if self.model_file:
self.model().load_params(self.model_file)
# setup an interrupt handler
signal.signal(signal.SIGINT, self.on_sigint_catch)
for c in self.callbacks:
c.on_train_begin(self.callback_data, self.model(), epochs)
def on_train_end(self):
"""
Call all registered callbacks' on_train_end functions.
"""
# reset the signal handler
signal.signal(signal.SIGINT, signal.SIG_DFL)
for c in self.callbacks:
c.on_train_end(self.callback_data, self.model())
self.callback_data.close()
def on_epoch_begin(self, epoch):
"""
Call all registered callbacks' on_epoch_begin functions.
Arguments:
epoch (int): index of epoch that is beginning
"""
for c in self.callbacks:
if c.should_fire(self.callback_data, self.model(), epoch, c.epoch_freq):
c.on_epoch_begin(self.callback_data, self.model(), epoch)
def on_epoch_end(self, epoch):
"""
Call all registered callbacks' on_epoch_end functions.
Arguments:
epoch (int): index of epoch that is ending
"""
for c in self.callbacks:
if c.should_fire(self.callback_data, self.model(), epoch, c.epoch_freq):
c.on_epoch_end(self.callback_data, self.model(), epoch)
self.epoch_marker += self.epoch_minibatches
self.callback_data['time_markers/minibatch'][epoch] = self.epoch_marker
self.callback_data['time_markers'].attrs['epochs_complete'] = epoch + 1
self.callback_data['time_markers'].attrs['minibatches_complete'] = self.epoch_marker
self.callback_data.flush()
def on_minibatch_begin(self, epoch, minibatch):
"""
Call all registered callbacks' on_minibatch_begin functions.
Arguments:
epoch (int): index of current epoch
minibatch (int): index of minibatch that is beginning
"""
for c in self.callbacks:
if c.should_fire(self.callback_data, self.model(), minibatch, c.minibatch_freq):
c.on_minibatch_begin(self.callback_data, self.model(), epoch, minibatch)
def on_minibatch_end(self, epoch, minibatch):
"""
Call all registered callbacks' on_minibatch_end functions.
Arguments:
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
for c in self.callbacks:
if c.should_fire(self.callback_data, self.model(), minibatch, c.minibatch_freq):
c.on_minibatch_end(self.callback_data, self.model(), epoch, minibatch)
# keep track of the number of mb per epoch, since they vary
self.epoch_minibatches = minibatch + 1
def on_sigint_catch(self, epoch, minibatch):
"""
Callback to handle SIGINT events.
Arguments:
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
# restore the orignal handler
signal.signal(signal.SIGINT, signal.SIG_DFL)
# save the model
if self.save_path is not None:
save_obj(self.model().serialize(keep_states=True), self.save_path)
raise KeyboardInterrupt('Checkpoint file saved to {0}'.format(self.save_path))
else:
raise KeyboardInterrupt
class Callback(NervanaObject):
"""
Interface defining common callback functions.
Implement a callback by subclassing Callback and overriding the necessary
on_[train,epoch,minibatch]_[begin,end] functions.
Callback functions provide time queues as arguments but derived callback
classes must manage their own state
"""
def __init__(self, epoch_freq=1, minibatch_freq=1):
self.epoch_freq = epoch_freq
self.minibatch_freq = minibatch_freq
self.costnm = None
def get_description(self):
"""
Serialize callback configuration.
"""
keys = inspect.getargspec(self.__init__)[0]
keys.remove('self')
skip = []
for key in keys:
if isinstance(getattr(self, key), NervanaDataIterator):
# data iterator inputs are serialized serpartely
skip.append(key)
pdict = super(Callback, self).get_description(skip=skip)
for datap in skip:
pdict['config'][datap] = {'type': 'Data', 'name': getattr(self, datap).name}
return pdict
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
"""
pass
def on_train_end(self, callback_data, model):
"""
Called when training is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
"""
pass
def on_epoch_begin(self, callback_data, model, epoch):
"""
Called when an epoch is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is beginning
"""
pass
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
pass
def on_minibatch_begin(self, callback_data, model, epoch, minibatch):
"""
Called when a minibatch is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is beginning
"""
pass
def on_minibatch_end(self, callback_data, model, epoch, minibatch):
"""
Called when a minibatch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
pass
def should_fire(self, callback_data, model, time, freq):
"""
Helper function for determining if a callback should do work at a given
interval.
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
time (int): current time, in an arbitrary unit
freq (int, list, None): firing frequency, in multiples of the unit used
for time, or a list of times, or None (never fire)
"""
t, f = time, freq
if ((type(f) is int and (t + 1) % f == 0) or (type(f) is list and t in f)):
return True
return False
def _get_cached_epoch_loss(self, callback_data, model, epoch, label):
"""
Helper function that checks if there exists a loss with a given label at a certain
epoch index. Depends on a LossCallback to have previously computed the loss and
stored in callback_data. Does not actually do any computation.
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): epoch index to check
label (str): label under which to find cached loss in callback_data
Returns:
dict containing loss cost value, timing information, and display information
"""
if self.costnm is None:
self.costnm = "Loss" # default costname to display if we can't resolve cost function
if model.cost:
self.costnm = model.cost.costfunc.__class__.__name__ + " " + self.costnm
cost_key = 'cost/' + label
time_key = 'time/' + label
if cost_key not in callback_data:
return None
eval_freq = callback_data[cost_key].attrs['epoch_freq']
if (epoch + 1) % eval_freq == 0:
return dict(cost=callback_data[cost_key][epoch // eval_freq],
time=callback_data[time_key][epoch // eval_freq],
costnm=self.costnm)
class SerializeModelCallback(Callback):
"""
Callback for serializing the state of the model.
Arguments:
save_path (str): where to save the model dataset
epoch_freq (int, optional): how often (in epochs) to serialize the
model. If not specified, we default to
running every epoch.
history (int, optional): number of checkpoint files to retain, newest
files up to this count are retained. filename
for the check point files will be
<save_path>_<epoch>.
"""
def __init__(self, save_path, epoch_freq=1, history=1):
super(SerializeModelCallback, self).__init__(epoch_freq=epoch_freq)
self.save_path = save_path
self.history = history
self.checkpoint_files = deque()
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
if self.history > 1:
self.save_history(epoch, model)
else:
save_obj(model.serialize(keep_states=True), self.save_path)
def save_history(self, epoch, model):
"""
Save history
"""
# if history > 1, this function will save the last N checkpoints
# where N is equal to self.history. The files will have the form
# of save_path with the epoch added to the filename before the ext
if len(self.checkpoint_files) > self.history:
# remove oldest checkpoint file when max count have been saved
fn = self.checkpoint_files.popleft()
try:
os.remove(fn)
logger.info('removed old checkpoint %s' % fn)
except OSError:
logger.warn('Could not delete old checkpoint file %s' % fn)
path_split = os.path.splitext(self.save_path)
save_path = '%s_%d%s' % (path_split[0], epoch, path_split[1])
# add the current file to the deque
self.checkpoint_files.append(save_path)
save_obj(model.serialize(keep_states=True), save_path)
# maintain a symlink pointing to the latest model params
try:
if os.path.islink(self.save_path):
os.remove(self.save_path)
os.symlink(os.path.split(save_path)[-1], self.save_path)
except OSError:
logger.warn('Could not create latest model symlink %s -> %s'
% (self.save_path, save_path))
class RunTimerCallback(Callback):
"""
Callback which tracks the total training time.
"""
def __init__(self):
super(RunTimerCallback, self).__init__()
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
timing = callback_data.create_group("time/train")
timing.create_dataset("start_time", (1,), dtype='float64')
timing.create_dataset("end_time", (1,), dtype='float64')
timing['start_time'][0] = time.time()
timing['start_time'].attrs['units'] = 'seconds'
def on_train_end(self, callback_data, model):
"""
Called when training is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
"""
callback_data['time/train/end_time'][0] = time.time()
callback_data['time/train/end_time'].attrs['units'] = 'seconds'
class TrainCostCallback(Callback):
"""
Callback for computing average training cost periodically during training.
"""
def __init__(self, wsz=10):
super(TrainCostCallback, self).__init__(epoch_freq=1)
self.wsz = wsz
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
# preallocate space for the number of minibatches in the whole run
points = callback_data['config'].attrs['total_minibatches']
callback_data.create_dataset("cost/train", (points,))
# make sure our window size is less than or equal to total number of minibatches
self.wsz = min(points, self.wsz)
self.cost_history = deque([], maxlen=int(self.wsz))
# clue in the data reader to use the 'minibatch' time_markers
callback_data['cost/train'].attrs['time_markers'] = 'minibatch'
def on_minibatch_end(self, callback_data, model, epoch, minibatch):
"""
Called when minibatch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
self.cost_history.append(model.cost.cost)
mean_cost = sum(self.cost_history) / len(self.cost_history)
mbstart = callback_data['time_markers/minibatch'][epoch - 1] if epoch > 0 else 0
callback_data['cost/train'][mbstart + minibatch] = mean_cost
class TrainMulticostCallback(Callback):
"""
Callback for computing average training cost periodically during training.
"""
def __init__(self, wsz=10):
super(TrainMulticostCallback, self).__init__(epoch_freq=1)
self.wsz = wsz
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
# get number of costs
assert isinstance(model.cost, Multicost), "Cost must be a Multicost"
self.ncosts = len(model.cost.costs)
# get number of nested-costs
self.ncosts_allbranches = sum([self.recursive_multicost_len(c) for c in model.cost.costs])
# preallocate space for the number of minibatches in the whole run
points = callback_data['config'].attrs['total_minibatches']
callback_data.create_dataset("multicost/train", (points, self.ncosts), dtype='float64')
callback_data.create_dataset("multicost/train_allbranches",
(points, self.ncosts_allbranches), dtype='float64')
# make sure our window size is less than or equal to total number of minibatches
self.wsz = min(points, self.wsz)
self.cost_history = deque([], maxlen=int(self.wsz))
# clue in the data reader to use the 'minibatch' time_markers
callback_data['multicost/train'].attrs['time_markers'] = 'minibatch'
callback_data['multicost/train_allbranches'].attrs['time_markers'] = 'minibatch'
def on_minibatch_end(self, callback_data, model, epoch, minibatch):
"""
Called when minibatch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
costs = np.array([c.cost for c in model.cost.costs])
self.cost_history.append(costs)
mean_cost = sum(self.cost_history) / len(self.cost_history)
mbstart = callback_data['time_markers/minibatch'][epoch-1] if epoch > 0 else 0
callback_data['multicost/train'][mbstart + minibatch, :] = mean_cost.squeeze()
# Extract all nested-multicosts
costs_allbranches = np.array([self.multicost_recurse(c) for c in model.cost.costs])
# Subtract non-trunk branches from summed trunk cost to get individual branch costs
costs_allbranches = self.separate_branch_costs(costs_allbranches)
callback_data['multicost/train_allbranches'][mbstart + minibatch, :] =\
costs_allbranches.squeeze()
def multicost_recurse(self, x):
"""
Called on a cost object to extract all costs of nested-multicosts, else return main cost.
Arguments:
x (Cost): cost object
"""
# recurse into nested multicosts to grab all cost branches
if type(x) == Multicost:
return [z for z in map(self.multicost_recurse, x.costs)]
else:
return x.cost
def separate_branch_costs(self, x):
"""
Called on list of lists of costs, where each nested list is a separate multicost,
and returns the un-summed individual branch costs.
Arguments:
x (list): list of lists of costs as returned by multicost_recurse
"""
# Subtract branch costs from total cost
x[0] -= np.sum([c[0] if type(c) == list else c for c in x[1:]])
# Recurse into non-trunk branches
for branch in x:
if type(branch) == list:
self.separate_branch_costs(branch)
# Return a flattened version of the list
return np.array([item for sublist in x for item in sublist])
def recursive_multicost_len(self, item):
"""
Called on a cost object and returns the number of actual cost values.
Arguments:
item (Cost): cost object
"""
# compute number of costs nested in multicosts
if type(item) == Multicost:
return sum(self.recursive_multicost_len(subitem) for subitem in item.costs)
else:
return 1
class LossCallback(Callback):
"""
Callback for calculating the loss on a given dataset periodically during training.
Arguments:
eval_set (NervanaDataIterator): dataset to evaluate
epoch_freq (int, optional): how often (in epochs) to log info.
Defaults to every 1 epoch.
"""
def __init__(self, eval_set, epoch_freq=1):
super(LossCallback, self).__init__(epoch_freq=epoch_freq)
self.eval_set = eval_set
self.loss = self.be.zeros((1, 1), dtype=np.float32)
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
callback_data.create_dataset("cost/loss", (epochs // self.epoch_freq,))
callback_data.create_dataset("time/loss", (epochs // self.epoch_freq,))
callback_data["cost/loss"].attrs['time_markers'] = 'epoch_freq'
callback_data["cost/loss"].attrs['epoch_freq'] = self.epoch_freq
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
start_loss = default_timer()
nprocessed = 0
self.loss[:] = 0
self.eval_set.reset()
for x, t in self.eval_set:
x = model.fprop(x, inference=True)
bsz = min(self.eval_set.ndata - nprocessed, self.be.bsz)
model.cost.get_cost(x, t)
nsteps = x.shape[1] // self.be.bsz if not isinstance(x, list) else \
x[0].shape[1] // self.be.bsz
costbuf = model.cost.outputs[:, :bsz * nsteps]
nprocessed += bsz
self.loss[:] = self.loss + self.be.sum(costbuf, axis=1) / nsteps
mean_cost = float(self.loss.get() / nprocessed)
callback_data["time/loss"][epoch // self.epoch_freq] = (default_timer() - start_loss)
callback_data["cost/loss"][epoch // self.epoch_freq] = mean_cost
class MetricCallback(Callback):
"""
Callback for calculating a metric on a given dataset periodically during
training.
Arguments:
eval_set (NervanaDataIterator): dataset to evaluate
metric (Metric): metric to evaluate
epoch_freq (int, optional): how often (in epochs) to log info.
Defaults to every 1 epoch.
"""
def __init__(self, eval_set, metric, epoch_freq=1):
super(MetricCallback, self).__init__(epoch_freq=epoch_freq)
if isinstance(metric, type) and issubclass(metric, Metric):
raise ValueError((
'metric passed in was the class {}. Pass an instance '
'of this class instead.'
).format(metric))
self.eval_set = eval_set
self.metric = metric
self.metric_cnt = len(self.metric.metric_names)
self.metric_desc = ", ".join(self.metric.metric_names)
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
if 'metrics' not in callback_data:
callback_data.create_group("metrics")
for met in self.metric.metric_names:
group_name = "metrics/%s" % met
callback_data.create_dataset(group_name, (epochs // self.epoch_freq,))
callback_data[group_name].attrs['time_markers'] = 'epoch_freq'
callback_data[group_name].attrs['epoch_freq'] = self.epoch_freq
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
if (epoch + 1) % self.epoch_freq == 0:
self.eval_set.reset()
stats = model.eval(self.eval_set, metric=self.metric)
logger.info('%s: %s', self.metric_desc, ", ".join(map(str, stats.flatten())))
for ind, met in enumerate(self.metric.metric_names):
callback_data["metrics/%s" % met][epoch // self.epoch_freq] = stats[ind]
class MultiLabelStatsCallback(Callback):
"""
Callback for calculating statistics on multi-label classification tasks.
Can be used with PrecisionRecall metric to calculate precision and recall
values of the classification task.
Arguments:
eval_set (NervanaDataIterator): dataset to evaluate
labels (list): the list of class names (order must be the same as
the rows of the target)
metric (Metric): An instantiated performance metric like
PrecisionRecall
epoch_freq (int, optional): how often (in epochs) to log info.
Defaults to every 1 epoch.
"""
def __init__(self, eval_set, labels, metric, epoch_freq=1):
super(MultiLabelStatsCallback, self).__init__(epoch_freq=epoch_freq)
self.eval_set = eval_set
self.metric = metric
self.labels = labels
self.metric_desc = ", ".join(self.metric.metric_names)
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
if (epoch + 1) % self.epoch_freq == 0:
self.eval_set.reset()
running_stats = np.zeros_like(self.metric.outputs.get(), dtype=np.float32)
# Calculate the metric values
nbatch = 0
for x, t in self.eval_set:
x = model.fprop(x, inference=True)
self.metric(x, t)
running_stats += self.metric.outputs.get()
nbatch += 1
running_stats /= nbatch
# Print the statistics for all the labels
for i, label in enumerate(self.labels):
metric_text = "["
for k, metric in enumerate(self.metric.metric_names):
metric_text += "%s: %d%% " % (metric, running_stats[i][k] * 100.0)
metric_text += "] -> %s\n" % label
sys.stdout.write(metric_text)
sys.stdout.flush()
class HistCallback(Callback):
"""
Collect histograms of weights of all layers. Configurable to computed
histograms once per minibatch or once per epoch using the plot_per_mini
flag. Histograms are stored to the hdf5 output file and can be visualized
using the nvis tool.
"""
def __init__(self, plot_per_mini, filter_key):
super(HistCallback, self).__init__(epoch_freq=1, minibatch_freq=1)
self.plot_per_mini = plot_per_mini
self.filter = filter_key
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
self.minibatches = callback_data['config'].attrs['total_minibatches']
hist_grp = callback_data.create_group("hist")
hist_grp.attrs['bins'] = self.be.hist_bins
hist_grp.attrs['offset'] = self.be.hist_offset
hist_grp.attrs['time_markers'] = 'minibatch' if self.plot_per_mini else 'epoch'
hist_grp.attrs['time_steps'] = self.minibatches if self.plot_per_mini else epochs
def on_minibatch_end(self, callback_data, model, epoch, minibatch):
"""
Called when minibatch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
if self.plot_per_mini:
prev_epochs_minibatches = 0
if epoch > 0:
prev_epochs_minibatches = callback_data['time_markers/minibatch'][epoch - 1]
timestamp = prev_epochs_minibatches + minibatch
self._save_hist_data(callback_data, model, timestamp)
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
epoch (int): index of epoch that is ending
"""
if not self.plot_per_mini:
self._save_hist_data(callback_data, model, epoch)
def _save_hist_data(self, callback_data, model, timestamp):
for l_i, l in enumerate(model.layers.layers):
for item in self.filter:
if hasattr(l, item):
name = "%s_%d_%s" % (l.name, l_i, item)
if getattr(l, item):
getattr(l, item).hist(name)
hist_grp = callback_data['hist']
points = hist_grp.attrs['time_steps']
hdata, hmap = self.be.dump_hist_data()
hdata = hdata.get()
for hname in hmap:
hist_dset = hist_grp.require_dataset(hname, shape=(64, points), dtype=hdata.dtype)
hist_dset[:, timestamp] = hdata[hmap[hname]].reshape((64,))
def get_progress_string(tag, epoch, minibatch, nbatches, cost, time,
blockchar=u'\u2588'):
"""
Generate a progress bar string.
Arguments:
tag (string): Label to print before the bar (i.e. Train, Valid, Test )
epoch (int): current epoch to display
minibatch (int): current minibatch to display
nbatches (int): total number of minibatches, used to display relative progress
cost (float): current cost value
time (float): time elapsed so far in epoch
blockchar (str, optional): character to display for each step of
progress in the bar. Defaults to u2588
(solid block)
"""
max_bar_width = 20
bar_width = int(float(minibatch) / nbatches * max_bar_width)
s = u'Epoch {:<3} [{} |{:<%s}| {:4}/{:<4} batches, {:.2f} cost, {:.2f}s]' % max_bar_width
return s.format(epoch, tag, blockchar * bar_width, minibatch, nbatches, cost, time)
class ProgressBarCallback(Callback):
"""
Callback providing a live updating console based progress bar.
"""
def __init__(self, epoch_freq=1,
minibatch_freq=1, update_thresh_s=0.1):
super(ProgressBarCallback, self).__init__(epoch_freq=epoch_freq,
minibatch_freq=minibatch_freq)
self.update_thresh_s = update_thresh_s
self._last_strlen = 0
def on_epoch_begin(self, callback_data, model, epoch):
"""
Called when an epoch is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is beginning
"""
self.start_epoch = self.last_update = default_timer()
self.nbatches = model.nbatches
def on_minibatch_end(self, callback_data, model, epoch, minibatch):
"""
Called when minibatch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
now = default_timer()
mb_complete = minibatch + 1
if (now - self.last_update > self.update_thresh_s or mb_complete == self.nbatches):
self.last_update = now
mbstart = callback_data['time_markers/minibatch'][epoch - 1] if epoch > 0 else 0
train_cost = callback_data['cost/train'][mbstart + minibatch]
progress_string = get_progress_string("Train", epoch, mb_complete, self.nbatches,
train_cost, now - self.start_epoch)
# clear the last line
sys.stdout.write('\r' + ' ' * self._last_strlen + '\r')
# print the new line
if PY3:
sys.stdout.write(progress_string)
else:
sys.stdout.write(progress_string.encode("utf-8"))
self._last_strlen = len(progress_string)
sys.stdout.flush()
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
_eil = self._get_cached_epoch_loss(callback_data, model, epoch, 'loss')
if _eil:
progress_string = " [%s %.2f, %.2fs]" % (_eil['costnm'], _eil['cost'], _eil['time'])
sys.stdout.write(progress_string)
sys.stdout.flush()
sys.stdout.write('\n')
class TrainLoggerCallback(Callback):
"""
Callback for logging training progress.
Arguments:
epoch_freq (int, optional): how often (in epochs) to log training info.
Defaults to every 1 epoch.
minibatch_freq (int, optional): how often (in minibatches) to log
training info, or None to log only on
epoch boundaries. Defaults to None.
"""
def __init__(self, epoch_freq=1, minibatch_freq=None):
super(TrainLoggerCallback, self).__init__(epoch_freq=epoch_freq,
minibatch_freq=minibatch_freq)
self.epoch_freq = epoch_freq
self.minibatch_freq = minibatch_freq
def on_train_begin(self, callback_data, model, epochs):
"""
Called when training is about to begin
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epochs (int): Total epochs
"""
logger.info("Model:\n%s", model)
def on_minibatch_end(self, callback_data, model, epoch, minibatch):
"""
Called when minibatch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of current epoch
minibatch (int): index of minibatch that is ending
"""
mbstart = callback_data['time_markers/minibatch'][epoch - 1] if epoch > 0 else 0
train_cost = callback_data['cost/train'][mbstart + minibatch]
logger.info("Epoch %d Minibatch %d complete. Train cost: %f", epoch, minibatch, train_cost)
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
_eil = self._get_cached_epoch_loss(callback_data, model, epoch, 'loss')
log_str = "Epoch %d complete. Train Cost %f." % (epoch, model.total_cost)
log_str += " Eval Cost %f" % _eil['cost'] if _eil else ""
logger.info(log_str)
class SaveBestStateCallback(Callback):
"""
Callback for saving the best model state so far.
Arguments:
path (str): repeatedly write the best model parameters seen so far to the
filesystem path specified.
"""
def __init__(self, path):
super(SaveBestStateCallback, self).__init__(epoch_freq=1)
self.best_path = path
self.best_cost = None
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
_eil = self._get_cached_epoch_loss(callback_data, model, epoch, 'loss')
if _eil:
if self.best_cost is None or _eil['cost'] < self.best_cost:
# TODO: switch this to a general seralization op
save_obj(model.serialize(keep_states=True), self.best_path)
self.best_cost = _eil['cost']
class EarlyStopCallback(Callback):
"""
Callback for stopping training when a threshold has been triggered.
Arguments:
stop_func (Function): Takes a function that receives a tuple (State, Val[t])
of the current state and the validation error at this time
and returns a tuple (State', Bool) that returns the updated
state and an indication of whether to stop training.
"""
def __init__(self, stop_func):
super(EarlyStopCallback, self).__init__(epoch_freq=1)
self.stop_func = stop_func
self.stop_state = None # state needed for the stop func
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
_eil = self._get_cached_epoch_loss(callback_data, model, epoch, 'loss')
if _eil:
self.stop_state, finished = self.stop_func(self.stop_state, _eil['cost'])
if finished:
model.finished = True
logger.warn('Early stopping function triggered: mean_cost %f.' % (_eil['cost']))
class DeconvCallback(Callback):
"""
Callback to store data after projecting activations back to pixel space using
guided backpropagation. See [Springenberg2014]_ for details. Meant to be
used for visualization purposes via nvis.
Arguments:
train_set (NervanaDataIterator): the training dataset
max_fm (int, optional): Maximum number of feature maps to visualize per
layer. Defaults to 16.
dataset_pct (float, optional): Initial portion of validation dataset to
use in finding maximum activations.
Defaults to 25.0 (25%).
Notes:
.. [Springenberg2014] http://arxiv.org/abs/1412.6806
"""
def __init__(self, train_set, valid_set, max_fm=16, dataset_pct=25):
super(DeconvCallback, self).__init__(epoch_freq=1)
self.train_set = train_set
self.valid_set = valid_set
self.max_fm = max_fm
self.dataset_pct = dataset_pct
self.name = "Guided Bprop"
def _progress_update(self, tag, curr, total, unit, time, blockchar=u'\u2588'):
# clear and redraw progress bar
max_bar_width = 20
bar_width = int(float(curr) / total * max_bar_width)
s = u'Visualization [{} |{:<%s}| {:4}/{:<4} {}, {:.2f}s]' % max_bar_width
progress_string = s.format(tag, blockchar * bar_width, curr, total, unit, time)
sys.stdout.write('\r' + progress_string)
sys.stdout.flush()
def on_train_end(self, callback_data, model):
"""
Called when training is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
"""
# TODO: generalize for more complex topologies
layers = model.layers.layers
self.raw_img_cache = dict()
self.raw_img_key = dict()
C, H, W = layers[0].in_shape
msg = "{} Visualization of {} feature maps per layer:"
logger.info(msg.format(self.name, self.max_fm))
for l, lyr in enumerate(layers):
if isinstance(lyr, Convolution):
K = lyr.convparams['K']
num_fm = min(K, self.max_fm)
lyr_data = callback_data.create_group("deconv/max_act/{0:04}".format(l))
lyr_data.create_dataset("batch_img", (num_fm, 2), dtype='uint16')
lyr_data.create_dataset("fm_loc", (num_fm, 1), dtype='int16')
lyr_data.create_dataset("vis", (num_fm, H, W, C), dtype='uint8')
lyr_data.create_dataset("activation", (num_fm, 1), dtype='float32')
lyr_data['activation'][:] = -float('Inf')
self.valid_set.reset()
t_start = time.time()
num_sampled_batches = int(self.dataset_pct / 100. *
self.valid_set.nbatches + 0.5)
for batch_ind, (x, t) in enumerate(self.valid_set, 0):
if batch_ind > num_sampled_batches:
break
imgs_to_store = self.get_layer_acts(callback_data, model, x, batch_ind)
self.store_images(callback_data, batch_ind, imgs_to_store, x, C, H, W)
self._progress_update("Find Max Act Imgs", batch_ind,
num_sampled_batches, "batches",
time.time() - t_start)
sys.stdout.write("\n")
# Loop over every layer to visualize
t_start = time.time()
for i in range(1, len(layers) + 1):
layer_ind = len(layers) - i
if isinstance(layers[layer_ind], Convolution):
num_fm, act_h, act_w = layers[layer_ind].out_shape
act_size = act_h * act_w
self.visualize_layer(callback_data, model, num_fm, act_size, layer_ind)
self._progress_update("Compute " + self.name, i,
len(layers), "layers",
time.time() - t_start)
sys.stdout.write("\n")
def scale_to_rgb(self, img):
"""
Convert float data to valid RGB values in the range [0, 255]
Arguments:
img (ndarray): the image data
Returns:
img (ndarray): image array with valid RGB values
"""
img_min = np.min(img)
img_rng = np.max(img) - img_min
img_255 = img - img_min
if img_rng > 0:
img_255 /= img_rng
img_255 *= 255.
return img_255
def store_images(self, callback_data, batch_ind, imgs_to_store, img_batch_data, C, H, W):
"""
Store images
"""
n_imgs = len(imgs_to_store)
if n_imgs:
img_data = img_batch_data[:, imgs_to_store].get()
img_store = callback_data.create_group('deconv/img/batch_' + str(batch_ind))
# Store uint8 HWC formatted data for plotting
img_hwc8 = img_store.create_dataset("HWC_uint8", (H, W, C, n_imgs),
dtype='uint8', compression=True)
img_hwc_f32 = np.transpose(img_data.reshape((C, H, W, n_imgs)), (1, 2, 0, 3))
img_hwc8[:] = self.scale_to_rgb(img_hwc_f32)
# keep image in native format to use for fprop in visualization
# don't need this beyond runtime so avoid writing to file
self.raw_img_cache[batch_ind] = img_data
# Keep a lookup from img_ind -> file position
# In order to store only needed imgs from batch in flat prealloc array
self.raw_img_key[batch_ind] = dict()
for i, img_idx in enumerate(imgs_to_store):
img_store.attrs[str(img_idx)] = i
self.raw_img_key[batch_ind][img_idx] = i
def get_layer_acts(self, callback_data, model, x, batch_ind):
"""
Get layer activations
"""
imgs_to_store = set()
for l, lyr in enumerate(model.layers.layers, 0):
x = lyr.fprop(x, inference=True)
if not isinstance(lyr, Convolution):
continue
num_fm, H, W = lyr.out_shape
fm_argmax = self.be.zeros((num_fm, 1), dtype=np.int32)
maxact_idx = self.be.array(np.arange(num_fm) * H * W * self.be.bsz, dtype=np.int32)
act_data = callback_data["deconv/max_act/{0:04}".format(l)]
all_acts = lyr.outputs.reshape((num_fm, H * W * self.be.bsz))
all_acts_flat = lyr.outputs.reshape((num_fm * H * W * self.be.bsz))
fm_argmax[:] = self.be.argmax(all_acts, axis=1)
maxact_idx[:] = maxact_idx + fm_argmax
acts_host = all_acts_flat[maxact_idx].get()
fm_argmax_host = fm_argmax.get()
num_fm_vis = min(num_fm, self.max_fm)
for fm in range(num_fm_vis):
argmax = fm_argmax_host[fm]
img_ind = int(argmax % self.be.bsz)
curr_max_act = acts_host[fm]
if curr_max_act > act_data['activation'][fm]:
act_data['activation'][fm] = curr_max_act
act_data['batch_img'][fm] = batch_ind, img_ind
act_data['fm_loc'][fm] = argmax // self.be.bsz
imgs_to_store.add(img_ind)
return list(imgs_to_store)
def visualize_layer(self, callback_data, model, num_fm, act_size, layer_ind):
"""
Visualize layer
"""
be = model.be
act_data = callback_data["deconv/max_act/{0:04}".format(layer_ind)]
layers = model.layers.layers
# Loop to visualize every feature map
num_fm_vis = min(num_fm, self.max_fm)
for fm in range(num_fm_vis):
batch_ind, img_ind = act_data['batch_img'][fm]
# Prepare a fake minibatch with just the max activation image for this fm
img_batch = np.zeros((self.raw_img_cache[batch_ind].shape[0], be.bsz))
img_cache_offs = self.raw_img_key[batch_ind][img_ind]
img_batch[:, 0] = self.raw_img_cache[batch_ind][:, img_cache_offs]
img_batch = be.array(img_batch)
# Prep model internal state by fprop-ing img
model.fprop(img_batch, inference=True)
# Set the max activation at the correct feature map location
fm_loc = act_data['fm_loc'][fm]
activation = np.zeros((num_fm, act_size, be.bsz))
activation[fm, fm_loc, :] = float(act_data['activation'][fm])
activation = activation.reshape((num_fm * act_size, be.bsz))
activation = be.array(activation)
# Loop over the previous layers to perform deconv
for i, l in enumerate(layers[layer_ind::-1], 0):
if isinstance(l, Convolution):
# zero out w.r.t. current layer activations
activation[:] = be.maximum(activation, 0)
# output shape of deconv is the input shape of conv
C, H, W = [l.convparams[x] for x in ['C', 'H', 'W']]
out = be.empty((C * H * W, be.bsz))
l.be.bprop_conv(layer=l.nglayer, F=l.W, E=activation, grad_I=out)
activation = out
# zero out w.r.t to input from lower layer
layer_below_acts = layers[layer_ind - i].inputs
layer_below_acts[:] = be.greater(layer_below_acts, 0)
activation[:] = be.multiply(layer_below_acts, activation)
C, H, W = layers[0].in_shape
activation = activation.get().reshape((C, H, W, be.bsz))
activation = np.transpose(activation, (1, 2, 0, 3))
act_data['vis'][fm] = self.scale_to_rgb(activation[:, :, :, 0])
class BatchNormTuneCallback(Callback):
"""
Callback for tuning batch norm parameters with unbiased estimators for global mean and var.
Arguments:
tune_set (Dataset): data set over which to tune parameters (usually a subset of the
training set)
"""
def __init__(self, tune_set, epoch_freq=1):
super(BatchNormTuneCallback, self).__init__(epoch_freq=epoch_freq)
self.tune_set = tune_set
self.bn_layers = None
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
if not self.bn_layers:
self.bn_layers = [l for l in model.layers_to_optimize if type(l) is BatchNorm]
if (epoch + 1) % self.epoch_freq == 0:
self.tune_set.reset()
for batch_idx, (x, t) in enumerate(self.tune_set):
for l in self.bn_layers:
l.rho = float(batch_idx) / (batch_idx + 1.)
model.fprop(x)
model.layers.revert_tensors()
debiaser = float((batch_idx + 1.0) / batch_idx)
for l in self.bn_layers:
l.gvar[:] = l.gvar * debiaser
class WatchTickerCallback(Callback):
"""
Callback that examines a single input, output pair using a validation set.
This only works with ticker datasets - it wouldn't make much sense to
use it with an image or a video or something.
Arguments:
model (Model): model object
valid_set (DataIterator): Validation dataset to process
epoch_freq (int, optional): how often (in epochs) to examine a pair.
Defaults to every 1 epoch.
"""
def __init__(self, model, valid_set, epoch_freq=1):
super(WatchTickerCallback, self).__init__(epoch_freq=epoch_freq)
self.model = model
self.valid_set = valid_set
if not isinstance(valid_set, Ticker):
raise ValueError('valid set must be a Ticker object')
def on_epoch_end(self, callback_data, model, epoch):
"""
Called when an epoch is about to end
Arguments:
callback_data (HDF5 dataset): shared data between callbacks
model (Model): model object
epoch (int): index of epoch that is ending
"""
for batch_index, (x, t) in enumerate(self.valid_set, 1):
y = model.fprop(x, inference=True)
# So that wider tensors don't wrap around
np.set_printoptions(formatter={'float': '{: 0.1f}'.format},
linewidth=150)
# Assume all sequences in minibatch have same length, then:
# pull the mask buffer to host from device
# get the list of all its columns that were nonzero
# take the maximum of those, which is the total number of timesteps
# divide by batch size to get time steps in one sequence for this minibatch
# add 1 for indexing purposes
columns = 1 + (np.max(t[1].get().nonzero()[1]) // self.be.bsz)
# Print out the name and pretty version of each of X, y, and mask
for name, item in zip(["Inputs", "Outputs", "Targets"],
[x, y, t[0]]):
neon_logger.display(name)
# Only get the first sequence in the minibatch
# There is no bias here - sequences are randomly generated
printable = item.get()[:, ::self.be.bsz]
neon_logger.display(printable[:, :columns])
# Only do this for one minibatch - it's a diagnostic tool, not a log
break
|
matthijsvk/multimodalSR
|
code/Experiments/neon-master/neon/callbacks/callbacks.py
|
Python
|
mit
| 60,510
|
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bordercolorsrc", parent_name="treemap.hoverlabel", **kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/treemap/hoverlabel/_bordercolorsrc.py
|
Python
|
mit
| 484
|
# coding: utf-8
from __future__ import unicode_literals
from model_mommy import mommy
from django.test import TestCase
from cmdbox.snippets.models import Snippet, Review
class SnippetsModelsTests(TestCase):
def setUp(self):
self.snippet = mommy.make(Snippet, slug='test-snippet')
self.review = mommy.make(Review, snippet=self.snippet, revision=1)
def test_snippet_unicode(self):
self.assertTrue(isinstance(self.snippet, Snippet))
self.assertEqual(self.snippet.__unicode__(), 'test-snippet')
def test_review_unicode(self):
self.assertTrue(isinstance(self.review, Review))
self.assertEqual(self.review.__unicode__(), 'test-snippet (1)')
def test_get_cleaned_content(self):
self.snippet.content = '/* comment */test content'
expected = 'test content'
actual = self.snippet.get_cleaned_content()
self.assertEqual(expected, actual)
def test_get_cleaned_content_2(self):
self.snippet.content = '/* comment */test content="/*"'
expected = 'test content="/*"'
actual = self.snippet.get_cleaned_content()
self.assertEqual(expected, actual)
def test_get_cleaned_content_3(self):
self.snippet.content = 'éçñø§^à'
expected = 'éçñø§^à'
actual = self.snippet.get_cleaned_content()
self.assertEqual(expected, actual)
def test_get_params_with_kwargs(self):
self.snippet.content = 'This is a {test} {formating} with kwargs'
expected = {'args': list(), 'kwargs': set(['test', 'formating'])}
actual = self.snippet.get_params()
self.assertEqual(expected, actual)
def test_get_params_with_args(self):
self.snippet.content = 'This is a {0} {1} with args'
expected = {'args': ['0', '1'], 'kwargs': set()}
actual = self.snippet.get_params()
self.assertEqual(expected, actual)
def test_get_params_with_args_duplicate_key(self):
self.snippet.content = '{0}{1}{0}'
expected = {'args': ['0', '1'], 'kwargs': set()}
actual = self.snippet.get_params()
self.assertEqual(expected, actual)
def test_get_params_with_positional_args(self):
self.snippet.content = 'This is a {} {} with args'
expected = {'args': ['', ''], 'kwargs': set()}
actual = self.snippet.get_params()
self.assertEqual(expected, actual)
def test_get_params_with_args_and_kwargs(self):
self.snippet.content = 'This is a {test} {format} {0} {1} with args and kwargs'
expected = {'args': ['0', '1'], 'kwargs': set(['test', 'format'])}
actual = self.snippet.get_params()
self.assertEqual(expected, actual)
def test_use(self):
self.snippet.content = 'This is a usage {test} with {format}'
expected = 'This is a usage lorem with ipsum'
actual = self.snippet.use(list(), {'test': 'lorem', 'format': 'ipsum'})
self.assertEqual(expected, actual)
self.assertEqual(1, self.snippet.used)
def test_use_raises_value_error(self):
with self.assertRaises(ValueError):
self.snippet.content = '{0}{}'
self.snippet.use(['a', 'b'], dict())
def test_use_raises_index_error(self):
with self.assertRaises(IndexError):
self.snippet.content = '{0}'
self.snippet.use(list(), dict())
def test_use_raises_key_error(self):
with self.assertRaises(KeyError):
self.snippet.content = '{kwarg}'
self.snippet.use(list(), dict())
|
vitorfs/cmdbox
|
cmdbox/snippets/tests/test_models.py
|
Python
|
mit
| 3,560
|
# -*- coding: utf-8 -*-
import logging
import pathlib
from typing import List, Optional
import iotlabcli.auth
from enoslib.api import play_on
from enoslib.objects import Host, Networks, Roles
from enoslib.infra.provider import Provider
from enoslib.infra.enos_iotlab.iotlab_api import IotlabAPI
from enoslib.infra.enos_iotlab.objects import (
IotlabHost,
IotlabSensor,
IotlabNetwork,
ssh_enabled,
)
from enoslib.infra.utils import mk_pools, pick_things
from enoslib.infra.enos_iotlab.constants import PROD
from enoslib.infra.enos_iotlab.configuration import (
PhysNodeConfiguration,
)
logger = logging.getLogger(__name__)
class Iotlab(Provider):
"""
The provider to be used when deploying on FIT/IoT-LAB testbed
Args:
provider_conf (iotlab.Configuration): Configuration file for IoT-LAB platform
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.provider_conf = self.provider_conf.finalize()
self.client = IotlabAPI()
self.hosts: List[IotlabHost] = []
self.sensors: List[IotlabSensor] = []
self.networks: List[IotlabNetwork] = []
def init(self, force_deploy: bool = False):
"""
Take ownership over FIT/IoT-LAB resources
Check if job is already running in the testbed
(based on the name given on config).
Submit a new job if necessary and wait its initialization.
Return inventory of resources allocated.
Returns:
(roles, dict): representing the inventory of resources.
"""
self._profiles()
self._reserve()
self._deploy()
return self._to_enoslib()
def collect_data_experiment(self, exp_dir: Optional[str] = None):
"""
Collects data about experiment from frontends
During the experiment, FIT/IoT-LAB collects and saves a lot of data
about it under the folder ~/.iot-lab/.
This method will connect to each frontend used during the test
(grenoble, paris, etc), will compress and fetch this data.
2 kinds of information are collected:
1. REST API, about experiment: saved as <exp_id>.tar.gz
2. .iot-lab/, from each frontend: saved as <exp_id>-<frontend>.tar.gz
Args:
exp_dir: Where to saves the tar.gz files. If none is provided
it will save in the current folder.
"""
if exp_dir is None:
dest_dir = str(pathlib.Path.cwd())
else:
dest_dir = str(pathlib.Path(exp_dir))
self.client.collect_data_experiment(dest_dir)
exp_id = self.client.get_job_id()
# getting sites used in tests
sites = set()
for sensor in self.sensors:
sites.add(sensor.site)
for host in self.hosts:
sites.add(host.site)
user, _ = iotlabcli.auth.get_user_credentials()
logger.info(
"Collecting experiment data from sites. Saving in folder: %s", dest_dir
)
with play_on(
roles=[Host(site + ".iot-lab.info", user=user) for site in sites],
on_error_continue=True,
) as p:
filename = "%d-{{ inventory_hostname }}.tar.gz" % (exp_id)
# use --ignore-command-error to avoid errors if monitoring
# files are being written
p.shell(
"cd .iot-lab/; tar --ignore-command-error -czf %s %d/"
% (filename, exp_id)
)
p.fetch(src=".iot-lab/" + filename, dest=dest_dir + "/", flat=True)
p.shell("cd .iot-lab/; rm -f %s" % filename)
def destroy(self):
"""Destroys the job and monitoring profiles"""
self.client.stop_experiment()
self.client.del_profile()
def _assert_clear_pool(self, pool_nodes):
"""Auxiliary method to verify that all nodes from the pool were used"""
for nodes in pool_nodes.values():
assert len(nodes) == 0
def _populate_from_board_nodes(self, iotlab_nodes: list):
"""Populate self.host from board nodes"""
pool_nodes = mk_pools(iotlab_nodes, lambda n: (n["site"], n["archi"]))
for config in self.provider_conf.machines:
iot_nodes = pick_things(
pool_nodes, (config.site, config.archi), config.number
)
for node in iot_nodes:
if ssh_enabled(node["network_address"]):
iotlab_host = IotlabHost(
address=node["network_address"],
roles=config.roles,
site=node["site"],
uid=node["uid"],
archi=node["archi"],
)
self.hosts.append(iotlab_host)
else:
iotlab_sensor = IotlabSensor(
address=node["network_address"],
roles=config.roles,
site=node["site"],
uid=node["uid"],
archi=node["archi"],
image=config.image,
iotlab_client=self.client,
)
self.sensors.append(iotlab_sensor)
self._assert_clear_pool(pool_nodes)
def _populate_from_phys_nodes(self, iotlab_nodes: list):
"""Populate self.host from physical nodes"""
pool_nodes = mk_pools(iotlab_nodes, lambda n: n["network_address"])
for config in self.provider_conf.machines:
for s in config.hostname:
# only 1 is available selecting by hostname
iot_node = pick_things(pool_nodes, s, 1)[0]
if ssh_enabled(iot_node["network_address"]):
iotlab_host = IotlabHost(
address=iot_node["network_address"],
roles=config.roles,
site=iot_node["site"],
uid=iot_node["uid"],
archi=iot_node["archi"],
)
self.hosts.append(iotlab_host)
else:
iotlab_sensor = IotlabSensor(
address=iot_node["network_address"],
roles=config.roles,
site=iot_node["site"],
uid=iot_node["uid"],
archi=iot_node["archi"],
image=config.image,
iotlab_client=self.client,
)
self.sensors.append(iotlab_sensor)
self._assert_clear_pool(pool_nodes)
def _deploy(self):
"""
Deploy image on nodes as described in given configuration
Wait for A8 nodes to boot
"""
image_dict = {}
for sensor in self.sensors:
if sensor.image is not None:
image_dict.setdefault(sensor.image, []).append(sensor.address)
for image, sensors in image_dict.items():
self.client.flash_nodes(image, sensors)
self.client.wait_ssh([h.ssh_address for h in self.hosts])
def _reserve(self):
"""Reserve resources on platform"""
iotlab_nodes = self.client.get_resources(
self.provider_conf.job_name,
self.provider_conf.walltime,
self.provider_conf.machines,
)
if isinstance(self.provider_conf.machines[0], PhysNodeConfiguration):
self._populate_from_phys_nodes(iotlab_nodes)
else:
self._populate_from_board_nodes(iotlab_nodes)
self._get_networks()
logger.info(
"Finished reserving nodes: hosts %s, sensors %s",
str(self.hosts),
str(self.sensors),
)
def _get_networks(self):
"""
Get networks used by A8 nodes on platform
By now use a fixed list of addresses since the API
doesn't provide any information about networks in testbed.
"""
networks_info = {
"grenoble": [
"10.0.12.0/21",
"2001:660:5307:3000::/64",
],
"paris": [
"10.0.68.0/21",
"2001:660:330f:a200::/64",
],
"saclay": [
"10.0.44.0/21",
"2001:660:3207:400::/64",
],
"strasbourg": [
"10.0.36.0/21",
"2001:660:4701:f080::/64",
],
"lyon": [
"10.0.100.0/21",
],
}
sites = set()
for host in self.hosts:
sites.add(host.site)
# add networks from user
for net in self.provider_conf.networks:
self.networks.extend(
[
IotlabNetwork(roles=net.roles, address=addr)
for addr in networks_info.get(net.site.lower(), [])
]
)
sites.discard(net.site.lower())
# add default networks not in conf
for site in sites:
self.networks.extend(
[
IotlabNetwork(roles=[PROD], address=addr)
for addr in networks_info.get(site.lower(), [])
]
)
def _profiles(self):
"""Create profiles"""
if self.provider_conf.profiles is None:
return
for profile in self.provider_conf.profiles:
if profile.radio is None and profile.consumption is None:
continue
self.client.create_profile(
name=profile.name,
archi=profile.archi,
radio=profile.radio,
consumption=profile.consumption,
)
def _to_enoslib(self):
"""Transform from provider specific resources to library-level resources"""
roles = Roles()
for host in self.hosts:
for role in host.roles:
if host.ssh_address:
roles.setdefault(role, []).append(
Host(host.ssh_address, user="root")
)
# shouldn't I be able to pass only host?
# Not because ansible inventory is based on address and
# our ssh_address is other for A8 nodes..
for sensor in self.sensors:
for role in sensor.roles:
roles.setdefault(role, []).append(sensor)
networks = Networks()
for network in self.networks:
for role in network.roles:
networks.setdefault(role, []).append(network)
return roles, networks
|
BeyondTheClouds/enoslib
|
enoslib/infra/enos_iotlab/provider.py
|
Python
|
gpl-3.0
| 10,746
|
# Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import concurrent.futures
import contextlib
import json
import logging
import multiprocessing
import pkgutil
import re
try:
import urllib.request as urllib_request
except ImportError: #pragma: no cover
import urllib2 as urllib_request
import xml.parsers.expat
try:
import xmlrpc.client as xmlrpc_client
except ImportError: #pragma: no cover
import xmlrpclib as xmlrpc_client
try:
CPU_COUNT = max(2, multiprocessing.cpu_count())
except NotImplementedError: #pragma: no cover
CPU_COUNT = 2
PROJECT_NAME = re.compile(r'[\w.-]+')
def just_name(supposed_name):
"""Strip off any versioning or restrictions metadata from a project name."""
return PROJECT_NAME.match(supposed_name).group(0).lower()
@contextlib.contextmanager
def pypi_client():
client = xmlrpc_client.ServerProxy('http://pypi.python.org/pypi')
try:
yield client
finally:
try:
client('close')()
except xml.parsers.expat.ExpatError: #pragma: no cover
# The close hack is not in Python 2.6.
pass
def overrides():
"""Load a set containing projects who are missing the proper Python 3 classifier.
Project names are always lowercased.
"""
raw_bytes = pkgutil.get_data(__name__, 'overrides.json')
return json.loads(raw_bytes.decode('utf-8'))
def py3_classifiers():
"""Fetch the Python 3-related trove classifiers."""
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
response = urllib_request.urlopen(url)
try:
try:
status = response.status
except AttributeError: #pragma: no cover
status = response.code
if status != 200: #pragma: no cover
msg = 'PyPI responded with status {0} for {1}'.format(status, url)
raise ValueError(msg)
data = response.read()
finally:
response.close()
classifiers = data.decode('utf-8').splitlines()
base_classifier = 'Programming Language :: Python :: 3'
return (classifier for classifier in classifiers
if classifier.startswith(base_classifier))
def projects_matching_classifier(classifier):
"""Find all projects matching the specified trove classifier."""
log = logging.getLogger('ciu')
with pypi_client() as client:
log.info('Fetching project list for {0!r}'.format(classifier))
try:
return frozenset(result[0].lower()
for result in client.browse([classifier]))
except xml.parsers.expat.ExpatError: #pragma: no cover
# Python 2.6 doesn't like empty results.
logging.getLogger('ciu').info("PyPI didn't return any results")
return []
def all_py3_projects(manual_overrides=None):
"""Return the set of names of all projects ported to Python 3, lowercased."""
log = logging.getLogger('ciu')
projects = set()
thread_pool_executor = concurrent.futures.ThreadPoolExecutor(
max_workers=CPU_COUNT)
with thread_pool_executor as executor:
for result in map(projects_matching_classifier, py3_classifiers()):
projects.update(result)
if manual_overrides is None:
manual_overrides = overrides()
stale_overrides = projects.intersection(manual_overrides)
log.info('Adding {0} overrides:'.format(len(manual_overrides)))
for override in sorted(manual_overrides):
msg = override
try:
msg += ' ({0})'.format(manual_overrides[override])
except TypeError:
# No reason a set can't be used.
pass
log.info(' ' + msg)
if stale_overrides: #pragma: no cover
log.warning('Stale overrides: {0}'.format(stale_overrides))
projects.update(manual_overrides)
return projects
def all_projects():
"""Get the set of all projects on PyPI."""
log = logging.getLogger('ciu')
with pypi_client() as client:
log.info('Fetching all project names from PyPI')
return frozenset(name.lower() for name in client.list_packages())
|
alex/caniusepython3
|
caniusepython3/pypi.py
|
Python
|
apache-2.0
| 4,699
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('predict', '0006_auto_20160902_1715'),
]
operations = [
migrations.RemoveField(
model_name='predictdatasetfile',
name='dataset',
),
migrations.DeleteModel(
name='ScriptToRun',
),
migrations.DeleteModel(
name='PredictDatasetFile',
),
]
|
IQSS/gentb-site
|
apps/predict/migrations/0007_auto_20170202_1539.py
|
Python
|
agpl-3.0
| 522
|
#coding=utf-8
'''
timer app;
used to scheduler tasks;
'''
import sys
from os import getcwd
from os.path import realpath, dirname, abspath, join
sys.path.insert(
0,
realpath(join(dirname(__file__), '../'))
)
from in_trip.lib.config import Config
from in_trip.lib.utils import parse_args
from in_trip.lib.timer import Timer
def main():
'''main loop of timer'''
args = parse_args()
config_file = getattr(args, 'config')
section = getattr(args, 'section') or "timer"
if config_file:
if config_file[0] != '/':
config_file = join(getcwd(), abspath(config_file))
Config.ACTUAL_CONFIG_FILE = config_file
Config.SECTION_NAME = section
Timer(Config()).run()
if __name__ == '__main__':
main()
|
seraphlnWu/in_trip
|
in_trip/in_trip/timer.py
|
Python
|
mit
| 754
|
from django.apps import AppConfig
class DjangoSloopConfig(AppConfig):
name = "django_sloop"
verbose_name = "Sloop"
|
Hipo/django-sloop
|
django_sloop/apps.py
|
Python
|
apache-2.0
| 125
|
## @package position_weighted
# Module caffe2.python.layers.position_weighted
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
)
from caffe2.python.layers.tags import Tags
logger = logging.getLogger(__name__)
class PositionWeighted(ModelLayer):
def __init__(self, model, input_record, weight_optim=None,
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
length_metadata = input_record.lengths.metadata
max_length = (length_metadata.categorical_limit if length_metadata is
not None else None)
if max_length is not None:
self.shape = max_length
else:
self.shape = get_categorical_limit(input_record)
logger.warning(
'{}: categorical_limit of lengths is not available, using '
'categorical_limit of the keys: {}'.format(
str(input_record.lengths()), self.shape))
self.pos_w = self.create_param(param_name='pos_w',
shape=[self.shape, ],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=weight_optim)
self.output_schema = schema.Struct(
('position_weights',
schema.Scalar((np.float32, self.shape),
self.get_next_blob_reference("pos_w_gather")))
)
self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
self.tags.update({Tags.GRADIENT_FROM_PS})
def get_memory_usage(self):
return self.shape
def add_ops(self, net):
inc_seq = net.LengthsRangeFill(
[self.input_record.lengths()],
self.input_record.lengths() + '_pos_w_seq'
)
net.Gather(
[self.pos_w, inc_seq],
self.output_schema.position_weights.field_blobs())
|
xzturn/caffe2
|
caffe2/python/layers/position_weighted.py
|
Python
|
apache-2.0
| 2,261
|
import settings as s
import SettingReader
try:
import Image
except ImportError:
from PIL import Image
import pytesseract
import os
import glob
import csv
import re
import pandas as pd
def extract_data(datasource):
# overall image directory
working_dir = os.path.join(s.input['datalogger_image_dir'], datasource['directory'])
output_dir = s.proc['ocr_results_path']
# regex for checking values
regex = re.compile(datasource['validation_expression'])
def validate_val(text):
return regex.match(text)
# date parser function
def date_parser(date):
return pd.datetime.strptime(date.strip(), datasource['datetime_format'])
for image_dir in os.listdir(working_dir):
if not os.path.isdir(os.path.join(working_dir, image_dir)):
continue
dir_name = os.path.basename(image_dir)
print dir_name
settings_displays = SettingReader.SettingReader(os.path.join(
working_dir,
image_dir,
'_displays.ini'))
# Create a file for each display contained
for displayID in settings_displays.values:
# File to write in
file_name = settings_displays.values[displayID]['sensor'] +\
"@" + dir_name + '.' + s.proc['ocr_results_extension']
settings_displays.values[displayID]['file'] = open(
os.path.join(
output_dir,
file_name),
'wb')
# CSV writer function
settings_displays.values[displayID]['writer'] = csv.writer(
settings_displays.values[displayID]['file'],
delimiter=s.proc['ocr_results_separator'])
# Write header
settings_displays.values[displayID]['writer'].writerow(
[s.proc['ocr_results_datecol'],
s.proc['ocr_results_valcol'],
s.proc['ocr_results_sensorcol']])
# loop through pictures
# select for images
for fn in glob.glob(os.path.join(working_dir, image_dir, '*.jpg')):
if os.path.isfile(fn):
# For each text section in image, extract information
for displayID in settings_displays.values:
original = Image.open(fn)
cropped = original.crop((
int(settings_displays.values[displayID]['left']),
int(settings_displays.values[displayID]['top']),
int(settings_displays.values[displayID]['right']),
int(settings_displays.values[displayID]['bottom'])
))
rotated = cropped.rotate(float(settings_displays.values[displayID]['rotation']), expand=True)
grayscale = rotated.convert('L')
# extract value from image
ocr_value = pytesseract.image_to_string(
grayscale,
lang=datasource['language'],
# config="-psm 7 -l digital")
config="-psm 7 --tessdata-dir tesseract_training/digital -l digital")
# get datetime
datetime_raw, extension = os.path.splitext(os.path.basename(fn))
# write text to csv file
if validate_val(ocr_value):
settings_displays.values[displayID]['writer'].writerow(
[date_parser(datetime_raw).strftime(s.proc['ocr_results_date_format']),
ocr_value,
settings_displays.values[displayID]['sensor']])
if s.proc['save_ocr_crops']:
grayscale.save(os.path.join(
output_dir,
'images',
datetime_raw+'_'+settings_displays.values[displayID]['sensor']+'.jpg'))
# Close all files
for displayID in settings_displays.values:
settings_displays.values[displayID]['file'].close()
|
mmmatthew/floodx_data_preprocessing
|
process_ocr.py
|
Python
|
mit
| 4,127
|
#!/usr/bin/python
import sys
from socket import *
import struct
import move_pb2
import move_direct_pb2
WATCH_PLAYER = {12884902615, 4294967949, 12884902629}
HOST='127.0.0.1'
PORT=10697
BUFSIZ=1024
ADDR=(HOST, PORT)
client=socket(AF_INET, SOCK_STREAM)
client.connect(ADDR)
last_data = ""
player_list = {}
while True:
data=client.recv(BUFSIZ)
data = last_data + data
data_len = len(data)
if data_len == 0:
break
if data_len < 8:
last_data = data
continue
msg_head = data[0:8]
cur_pos = 8
msg_len, msg_id, seq = struct.unpack('=IHH', msg_head)
if msg_len > data_len:
last_data = data
continue
pb_len = msg_len - 8 - 16
pb_data = data[cur_pos:cur_pos+pb_len]
cur_pos = cur_pos + pb_len
extern_data = data[cur_pos:cur_pos+16]
cur_pos = cur_pos + 16
player_id,t1,t1,t1 = struct.unpack('=QIHH', extern_data)
if cur_pos > data_len:
print 'err, cur_pos[%d] > data_len[%d]' %(cur_pos, data_len)
break
if cur_pos == data_len:
last_data = ''
else:
last_data = data[cur_pos:]
# data_len = data_len - 8 - 16
# msg_format = "=IHH" + str(data_len) + 'sQIHH'
# msg_len, msg_id, seq, pb_data, player_id, t1, t1, t1 = struct.unpack(msg_format, data)
# if not player_id in WATCH_PLAYER:
# continue;
if msg_id == 10103:
req = move_pb2.sight_changed_notify()
req.ParseFromString(pb_data)
# print "len = %d, id = %d, seq = %d, player_id = %d add %d delete %d" %(msg_len, msg_id, seq, player_id, len(req.add_player), len(req.delete_player))
for t1 in req.add_player:
if len(t1.data) == 0:
print 'err add player %lu data len == 0' %t1.playerid
sys.exit(0)
print "add player %lu %s %.1f %.1f" % (t1.playerid, t1.name, t1.data[0].pos_x, t1.data[0].pos_z)
if t1.playerid in player_list:
print 'err add player %lu' %t1.playerid
sys.exit(0)
player_list[t1.playerid] = t1.name
for t1 in req.delete_player:
if not t1 in player_list:
print 'err del player %lu' %t1
sys.exit(0)
print "del player %lu %s" % (t1, player_list[t1])
del player_list[t1]
# if msg_id == 10102: #MSG_ID_MOVE_NOTIFY
# req = move_pb2.move_notify()
# req.ParseFromString(pb_data)
# print "get move notify from player %lu " % req.playerid
# if req.playerid == 0:
# print 'player id == 0'
# break;
# print "len = %d, id = %d, seq = %d, player_id = %d" %(msg_len, msg_id, seq, player_id)
if msg_id == 10100:
# print "len = %d, id = %d, seq = %d, player_id = %d" %(msg_len, msg_id, seq, player_id)
req = move_pb2.move_request()
req.ParseFromString(pb_data)
for t1 in req.data:
print "%.1f : %.1f" %(t1.pos_x,t1.pos_z)
#
# if msg_id == 10105:
# print "len = %d, id = %d, seq = %d, player_id = %d" %(msg_len, msg_id, seq, player_id)
# req = move_direct_pb2.move_start_request()
# req.ParseFromString(pb_data)
# print "%.1f : %.1f | %.1f %.1f %.1f" %(req.cur_pos.pos_x, req.cur_pos.pos_z, req.direct_x, req.direct_z, req.direct_y)
#
# if msg_id == 10108:
# print "len = %d, id = %d, seq = %d, player_id = %d" %(msg_len, msg_id, seq, player_id)
# req = move_direct_pb2.move_stop_request()
# req.ParseFromString(pb_data)
# print "%.1f : %.1f" %(req.cur_pos.pos_x, req.cur_pos.pos_z)
|
tsdfsetatata/xserver
|
Server/dump_srv/dump_srv.py
|
Python
|
gpl-3.0
| 3,704
|
#!/usr/bin/python
import smbus
# ===========================================================================
# Adafruit_I2C Class
# ===========================================================================
class Adafruit_I2C :
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Courtesy quick2wire-python-api
# https://github.com/quick2wire/quick2wire-python-api
try:
with open('/proc/cpuinfo','r') as f:
for line in f:
if line.startswith('Revision'):
return 1 if int(line.rstrip()[-4:]) < 3 else 2
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Adafruit_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(
busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print "I2C: Wrote 0x%02X to register 0x%02X" % (value, reg)
except IOError, err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" %
(value, reg, reg+1))
except IOError, err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print "I2C: Writing list to register 0x%02X:" % reg
print list
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError, err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print results
return results
except IOError, err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readU16(self, reg):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
def readS16(self, reg):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
if __name__ == '__main__':
try:
bus = Adafruit_I2C(address=0)
print "Default I2C bus is accessible"
except:
print "Error accessing default I2C bus"
|
RorschachUK/meArmPi
|
Adafruit_I2C.py
|
Python
|
mit
| 4,583
|
__author__ = 'y42sora'
import HTTPGetMethod
import UrlListRestApi
class REST(object):
auth = None
def __init__(self, auth=None):
self.auth = auth
def homeTimeline(self):
pass
def publicTimeline(self):
HTTPGetMethod.getMethod(UrlListRestApi.PUBLIC_TIMELINE)
|
y42sora/Twitter4Py3
|
Rest.py
|
Python
|
apache-2.0
| 318
|
import rules
from rules.predicates import is_superuser
from adhocracy4.organisations.predicates import is_initiator
from adhocracy4.organisations.predicates import is_org_group_member
from adhocracy4.organisations.predicates import is_org_member
from .predicates import is_live
from .predicates import is_moderator
from .predicates import is_prj_group_member
from .predicates import is_project_member
from .predicates import is_public
from .predicates import is_semipublic
rules.add_perm('a4projects.add_project',
is_superuser | is_initiator | is_org_group_member)
rules.add_perm('a4projects.change_project',
is_superuser | is_initiator | is_prj_group_member)
rules.add_perm('a4projects.view_project',
is_superuser | is_initiator | is_prj_group_member |
is_moderator | ((is_public | is_semipublic | is_org_member |
is_project_member) & is_live))
rules.add_perm('a4projects.participate_in_project',
is_superuser | is_initiator | is_prj_group_member |
is_moderator | ((is_public | is_org_member |
is_project_member) & is_live))
rules.add_perm('a4projects.delete_project',
is_superuser | is_initiator)
|
liqd/adhocracy4
|
adhocracy4/projects/rules.py
|
Python
|
agpl-3.0
| 1,278
|
#
# This file is part of Mapnik (C++/Python mapping toolkit)
# Copyright (C) 2014 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""Mapnik Python module.
Boost Python bindings to the Mapnik C++ shared library.
Several things happen when you do:
>>> import mapnik
1) Mapnik C++ objects are imported via the '__init__.py' from the '_mapnik.so' shared object
(_mapnik.pyd on win) which references libmapnik.so (linux), libmapnik.dylib (mac), or
mapnik.dll (win32).
2) The paths to the input plugins and font directories are imported from the 'paths.py'
file which was constructed and installed during SCons installation.
3) All available input plugins and TrueType fonts are automatically registered.
4) Boost Python metaclass injectors are used in the '__init__.py' to extend several
objects adding extra convenience when accessed via Python.
"""
import itertools
import os
import warnings
try:
import json
except ImportError:
import simplejson as json
def bootstrap_env():
"""
If an optional settings file exists, inherit its
environment settings before loading the mapnik library.
This feature is intended for customized packages of mapnik.
The settings file should be a python file with an 'env' variable
that declares a dictionary of key:value pairs to push into the
global process environment, if not already set, like:
env = {'ICU_DATA':'/usr/local/share/icu/'}
"""
if os.path.exists(os.path.join(
os.path.dirname(__file__), 'mapnik_settings.py')):
from .mapnik_settings import env
process_keys = os.environ.keys()
for key, value in env.items():
if key not in process_keys:
os.environ[key] = value
bootstrap_env()
from ._mapnik import *
from . import printing
printing.renderer = render
# The base Boost.Python class
BoostPythonMetaclass = Coord.__class__
class _MapnikMetaclass(BoostPythonMetaclass):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k, v in list(dict.items()):
if hasattr(b, k):
setattr(b, '_c_' + k, getattr(b, k))
setattr(b, k, v)
return type.__init__(self, name, bases, dict)
# metaclass injector compatible with both python 2 and 3
# http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
_injector = _MapnikMetaclass('_injector', (object, ), {})
def Filter(*args, **kwargs):
warnings.warn("'Filter' is deprecated and will be removed in Mapnik 3.x, use 'Expression' instead",
DeprecationWarning, 2)
return Expression(*args, **kwargs)
class Envelope(Box2d):
def __init__(self, *args, **kwargs):
warnings.warn("'Envelope' is deprecated and will be removed in Mapnik 3.x, use 'Box2d' instead",
DeprecationWarning, 2)
Box2d.__init__(self, *args, **kwargs)
class _Coord(Coord, _injector):
"""
Represents a point with two coordinates (either lon/lat or x/y).
Following operators are defined for Coord:
Addition and subtraction of Coord objects:
>>> Coord(10, 10) + Coord(20, 20)
Coord(30.0, 30.0)
>>> Coord(10, 10) - Coord(20, 20)
Coord(-10.0, -10.0)
Addition, subtraction, multiplication and division between
a Coord and a float:
>>> Coord(10, 10) + 1
Coord(11.0, 11.0)
>>> Coord(10, 10) - 1
Coord(-9.0, -9.0)
>>> Coord(10, 10) * 2
Coord(20.0, 20.0)
>>> Coord(10, 10) / 2
Coord(5.0, 5.0)
Equality of coords (as pairwise equality of components):
>>> Coord(10, 10) is Coord(10, 10)
False
>>> Coord(10, 10) == Coord(10, 10)
True
"""
def __repr__(self):
return 'Coord(%s,%s)' % (self.x, self.y)
def forward(self, projection):
"""
Projects the point from the geographic coordinate
space into the cartesian space. The x component is
considered to be longitude, the y component the
latitude.
Returns the easting (x) and northing (y) as a
coordinate pair.
Example: Project the geographic coordinates of the
city center of Stuttgart into the local
map projection (GK Zone 3/DHDN, EPSG 31467)
>>> p = Projection('+init=epsg:31467')
>>> Coord(9.1, 48.7).forward(p)
Coord(3507360.12813,5395719.2749)
"""
return forward_(self, projection)
def inverse(self, projection):
"""
Projects the point from the cartesian space
into the geographic space. The x component is
considered to be the easting, the y component
to be the northing.
Returns the longitude (x) and latitude (y) as a
coordinate pair.
Example: Project the cartesian coordinates of the
city center of Stuttgart in the local
map projection (GK Zone 3/DHDN, EPSG 31467)
into geographic coordinates:
>>> p = Projection('+init=epsg:31467')
>>> Coord(3507360.12813,5395719.2749).inverse(p)
Coord(9.1, 48.7)
"""
return inverse_(self, projection)
class _Box2d(Box2d, _injector):
"""
Represents a spatial envelope (i.e. bounding box).
Following operators are defined for Box2d:
Addition:
e1 + e2 is equivalent to e1.expand_to_include(e2) but yields
a new envelope instead of modifying e1
Subtraction:
Currently e1 - e2 returns e1.
Multiplication and division with floats:
Multiplication and division change the width and height of the envelope
by the given factor without modifying its center..
That is, e1 * x is equivalent to:
e1.width(x * e1.width())
e1.height(x * e1.height()),
except that a new envelope is created instead of modifying e1.
e1 / x is equivalent to e1 * (1.0/x).
Equality: two envelopes are equal if their corner points are equal.
"""
def __repr__(self):
return 'Box2d(%s,%s,%s,%s)' % \
(self.minx, self.miny, self.maxx, self.maxy)
def forward(self, projection):
"""
Projects the envelope from the geographic space
into the cartesian space by projecting its corner
points.
See also:
Coord.forward(self, projection)
"""
return forward_(self, projection)
def inverse(self, projection):
"""
Projects the envelope from the cartesian space
into the geographic space by projecting its corner
points.
See also:
Coord.inverse(self, projection).
"""
return inverse_(self, projection)
class _Projection(Projection, _injector):
def __repr__(self):
return "Projection('%s')" % self.params()
def forward(self, obj):
"""
Projects the given object (Box2d or Coord)
from the geographic space into the cartesian space.
See also:
Box2d.forward(self, projection),
Coord.forward(self, projection).
"""
return forward_(obj, self)
def inverse(self, obj):
"""
Projects the given object (Box2d or Coord)
from the cartesian space into the geographic space.
See also:
Box2d.inverse(self, projection),
Coord.inverse(self, projection).
"""
return inverse_(obj, self)
class _Feature(Feature, _injector):
__geo_interface__ = property(lambda self: json.loads(self.to_geojson()))
class _Geometry(Geometry, _injector):
__geo_interface__ = property(lambda self: json.loads(self.to_geojson()))
class _Datasource(Datasource, _injector):
def all_features(self, fields=None, variables={}):
query = Query(self.envelope())
query.set_variables(variables)
attributes = fields or self.fields()
for fld in attributes:
query.add_property_name(fld)
return self.features(query).features
def featureset(self, fields=None, variables={}):
query = Query(self.envelope())
query.set_variables(variables)
attributes = fields or self.fields()
for fld in attributes:
query.add_property_name(fld)
return self.features(query)
class _Color(Color, _injector):
def __repr__(self):
return "Color(R=%d,G=%d,B=%d,A=%d)" % (self.r, self.g, self.b, self.a)
class _SymbolizerBase(SymbolizerBase, _injector):
# back compatibility
@property
def filename(self):
return self['file']
@filename.setter
def filename(self, val):
self['file'] = val
def _add_symbol_method_to_symbolizers(vars=globals()):
def symbol_for_subcls(self):
return self
def symbol_for_cls(self):
return getattr(self, self.type())()
for name, obj in vars.items():
if name.endswith('Symbolizer') and not name.startswith('_'):
if name == 'Symbolizer':
symbol = symbol_for_cls
else:
symbol = symbol_for_subcls
type('dummy', (obj, _injector), {'symbol': symbol})
_add_symbol_method_to_symbolizers()
def Datasource(**keywords):
"""Wrapper around CreateDatasource.
Create a Mapnik Datasource using a dictionary of parameters.
Keywords must include:
type='plugin_name' # e.g. type='gdal'
See the convenience factory methods of each input plugin for
details on additional required keyword arguments.
"""
return CreateDatasource(keywords)
# convenience factory methods
def Shapefile(**keywords):
"""Create a Shapefile Datasource.
Required keyword arguments:
file -- path to shapefile without extension
Optional keyword arguments:
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
>>> from mapnik import Shapefile, Layer
>>> shp = Shapefile(base='/home/mapnik/data',file='world_borders')
>>> lyr = Layer('Shapefile Layer')
>>> lyr.datasource = shp
"""
keywords['type'] = 'shape'
return CreateDatasource(keywords)
def CSV(**keywords):
"""Create a CSV Datasource.
Required keyword arguments:
file -- path to csv
Optional keyword arguments:
inline -- inline CSV string (if provided 'file' argument will be ignored and non-needed)
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
row_limit -- integer limit of rows to return (default: 0)
strict -- throw an error if an invalid row is encountered
escape -- The escape character to use for parsing data
quote -- The quote character to use for parsing data
separator -- The separator character to use for parsing data
headers -- A comma separated list of header names that can be set to add headers to data that lacks them
filesize_max -- The maximum filesize in MB that will be accepted
>>> from mapnik import CSV
>>> csv = CSV(file='test.csv')
>>> from mapnik import CSV
>>> csv = CSV(inline='''wkt,Name\n"POINT (120.15 48.47)","Winthrop, WA"''')
For more information see https://github.com/mapnik/mapnik/wiki/CSV-Plugin
"""
keywords['type'] = 'csv'
return CreateDatasource(keywords)
def GeoJSON(**keywords):
"""Create a GeoJSON Datasource.
Required keyword arguments:
file -- path to json
Optional keyword arguments:
encoding -- file encoding (default 'utf-8')
base -- path prefix (default None)
>>> from mapnik import GeoJSON
>>> geojson = GeoJSON(file='test.json')
"""
keywords['type'] = 'geojson'
return CreateDatasource(keywords)
def PostGIS(**keywords):
"""Create a PostGIS Datasource.
Required keyword arguments:
dbname -- database name to connect to
table -- table name or subselect query
*Note: if using subselects for the 'table' value consider also
passing the 'geometry_field' and 'srid' and 'extent_from_subquery'
options and/or specifying the 'geometry_table' option.
Optional db connection keyword arguments:
user -- database user to connect as (default: see postgres docs)
password -- password for database user (default: see postgres docs)
host -- postgres hostname (default: see postgres docs)
port -- postgres port (default: see postgres docs)
initial_size -- integer size of connection pool (default: 1)
max_size -- integer max of connection pool (default: 10)
persist_connection -- keep connection open (default: True)
Optional table-level keyword arguments:
extent -- manually specified data extent (comma delimited string, default: None)
estimate_extent -- boolean, direct PostGIS to use the faster, less accurate `estimate_extent` over `extent` (default: False)
extent_from_subquery -- boolean, direct Mapnik to query Postgis for the extent of the raw 'table' value (default: uses 'geometry_table')
geometry_table -- specify geometry table to use to look up metadata (default: automatically parsed from 'table' value)
geometry_field -- specify geometry field to use (default: first entry in geometry_columns)
srid -- specify srid to use (default: auto-detected from geometry_field)
row_limit -- integer limit of rows to return (default: 0)
cursor_size -- integer size of binary cursor to use (default: 0, no binary cursor is used)
>>> from mapnik import PostGIS, Layer
>>> params = dict(dbname=env['MAPNIK_NAME'],table='osm',user='postgres',password='gis')
>>> params['estimate_extent'] = False
>>> params['extent'] = '-20037508,-19929239,20037508,19929239'
>>> postgis = PostGIS(**params)
>>> lyr = Layer('PostGIS Layer')
>>> lyr.datasource = postgis
"""
keywords['type'] = 'postgis'
return CreateDatasource(keywords)
def PgRaster(**keywords):
"""Create a PgRaster Datasource.
Required keyword arguments:
dbname -- database name to connect to
table -- table name or subselect query
*Note: if using subselects for the 'table' value consider also
passing the 'raster_field' and 'srid' and 'extent_from_subquery'
options and/or specifying the 'raster_table' option.
Optional db connection keyword arguments:
user -- database user to connect as (default: see postgres docs)
password -- password for database user (default: see postgres docs)
host -- postgres hostname (default: see postgres docs)
port -- postgres port (default: see postgres docs)
initial_size -- integer size of connection pool (default: 1)
max_size -- integer max of connection pool (default: 10)
persist_connection -- keep connection open (default: True)
Optional table-level keyword arguments:
extent -- manually specified data extent (comma delimited string, default: None)
estimate_extent -- boolean, direct PostGIS to use the faster, less accurate `estimate_extent` over `extent` (default: False)
extent_from_subquery -- boolean, direct Mapnik to query Postgis for the extent of the raw 'table' value (default: uses 'geometry_table')
raster_table -- specify geometry table to use to look up metadata (default: automatically parsed from 'table' value)
raster_field -- specify geometry field to use (default: first entry in raster_columns)
srid -- specify srid to use (default: auto-detected from geometry_field)
row_limit -- integer limit of rows to return (default: 0)
cursor_size -- integer size of binary cursor to use (default: 0, no binary cursor is used)
use_overviews -- boolean, use overviews when available (default: false)
prescale_rasters -- boolean, scale rasters on the db side (default: false)
clip_rasters -- boolean, clip rasters on the db side (default: false)
band -- integer, if non-zero interprets the given band (1-based offset) as a data raster (default: 0)
>>> from mapnik import PgRaster, Layer
>>> params = dict(dbname='mapnik',table='osm',user='postgres',password='gis')
>>> params['estimate_extent'] = False
>>> params['extent'] = '-20037508,-19929239,20037508,19929239'
>>> pgraster = PgRaster(**params)
>>> lyr = Layer('PgRaster Layer')
>>> lyr.datasource = pgraster
"""
keywords['type'] = 'pgraster'
return CreateDatasource(keywords)
def Raster(**keywords):
"""Create a Raster (Tiff) Datasource.
Required keyword arguments:
file -- path to stripped or tiled tiff
lox -- lowest (min) x/longitude of tiff extent
loy -- lowest (min) y/latitude of tiff extent
hix -- highest (max) x/longitude of tiff extent
hiy -- highest (max) y/latitude of tiff extent
Hint: lox,loy,hix,hiy make a Mapnik Box2d
Optional keyword arguments:
base -- path prefix (default None)
multi -- whether the image is in tiles on disk (default False)
Multi-tiled keyword arguments:
x_width -- virtual image number of tiles in X direction (required)
y_width -- virtual image number of tiles in Y direction (required)
tile_size -- if an image is in tiles, how large are the tiles (default 256)
tile_stride -- if an image is in tiles, what's the increment between rows/cols (default 1)
>>> from mapnik import Raster, Layer
>>> raster = Raster(base='/home/mapnik/data',file='elevation.tif',lox=-122.8,loy=48.5,hix=-122.7,hiy=48.6)
>>> lyr = Layer('Tiff Layer')
>>> lyr.datasource = raster
"""
keywords['type'] = 'raster'
return CreateDatasource(keywords)
def Gdal(**keywords):
"""Create a GDAL Raster Datasource.
Required keyword arguments:
file -- path to GDAL supported dataset
Optional keyword arguments:
base -- path prefix (default None)
shared -- boolean, open GdalDataset in shared mode (default: False)
bbox -- tuple (minx, miny, maxx, maxy). If specified, overrides the bbox detected by GDAL.
>>> from mapnik import Gdal, Layer
>>> dataset = Gdal(base='/home/mapnik/data',file='elevation.tif')
>>> lyr = Layer('GDAL Layer from TIFF file')
>>> lyr.datasource = dataset
"""
keywords['type'] = 'gdal'
if 'bbox' in keywords:
if isinstance(keywords['bbox'], (tuple, list)):
keywords['bbox'] = ','.join([str(item)
for item in keywords['bbox']])
return CreateDatasource(keywords)
def Occi(**keywords):
"""Create a Oracle Spatial (10g) Vector Datasource.
Required keyword arguments:
user -- database user to connect as
password -- password for database user
host -- oracle host to connect to (does not refer to SID in tsnames.ora)
table -- table name or subselect query
Optional keyword arguments:
initial_size -- integer size of connection pool (default 1)
max_size -- integer max of connection pool (default 10)
extent -- manually specified data extent (comma delimited string, default None)
estimate_extent -- boolean, direct Oracle to use the faster, less accurate estimate_extent() over extent() (default False)
encoding -- file encoding (default 'utf-8')
geometry_field -- specify geometry field (default 'GEOLOC')
use_spatial_index -- boolean, force the use of the spatial index (default True)
>>> from mapnik import Occi, Layer
>>> params = dict(host='myoracle',user='scott',password='tiger',table='test')
>>> params['estimate_extent'] = False
>>> params['extent'] = '-20037508,-19929239,20037508,19929239'
>>> oracle = Occi(**params)
>>> lyr = Layer('Oracle Spatial Layer')
>>> lyr.datasource = oracle
"""
keywords['type'] = 'occi'
return CreateDatasource(keywords)
def Ogr(**keywords):
"""Create a OGR Vector Datasource.
Required keyword arguments:
file -- path to OGR supported dataset
layer -- name of layer to use within datasource (optional if layer_by_index or layer_by_sql is used)
Optional keyword arguments:
layer_by_index -- choose layer by index number instead of by layer name or sql.
layer_by_sql -- choose layer by sql query number instead of by layer name or index.
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
>>> from mapnik import Ogr, Layer
>>> datasource = Ogr(base='/home/mapnik/data',file='rivers.geojson',layer='OGRGeoJSON')
>>> lyr = Layer('OGR Layer from GeoJSON file')
>>> lyr.datasource = datasource
"""
keywords['type'] = 'ogr'
return CreateDatasource(keywords)
def SQLite(**keywords):
"""Create a SQLite Datasource.
Required keyword arguments:
file -- path to SQLite database file
table -- table name or subselect query
Optional keyword arguments:
base -- path prefix (default None)
encoding -- file encoding (default 'utf-8')
extent -- manually specified data extent (comma delimited string, default None)
metadata -- name of auxiliary table containing record for table with xmin, ymin, xmax, ymax, and f_table_name
geometry_field -- name of geometry field (default 'the_geom')
key_field -- name of primary key field (default 'OGC_FID')
row_offset -- specify a custom integer row offset (default 0)
row_limit -- specify a custom integer row limit (default 0)
wkb_format -- specify a wkb type of 'spatialite' (default None)
use_spatial_index -- boolean, instruct sqlite plugin to use Rtree spatial index (default True)
>>> from mapnik import SQLite, Layer
>>> sqlite = SQLite(base='/home/mapnik/data',file='osm.db',table='osm',extent='-20037508,-19929239,20037508,19929239')
>>> lyr = Layer('SQLite Layer')
>>> lyr.datasource = sqlite
"""
keywords['type'] = 'sqlite'
return CreateDatasource(keywords)
def Rasterlite(**keywords):
"""Create a Rasterlite Datasource.
Required keyword arguments:
file -- path to Rasterlite database file
table -- table name or subselect query
Optional keyword arguments:
base -- path prefix (default None)
extent -- manually specified data extent (comma delimited string, default None)
>>> from mapnik import Rasterlite, Layer
>>> rasterlite = Rasterlite(base='/home/mapnik/data',file='osm.db',table='osm',extent='-20037508,-19929239,20037508,19929239')
>>> lyr = Layer('Rasterlite Layer')
>>> lyr.datasource = rasterlite
"""
keywords['type'] = 'rasterlite'
return CreateDatasource(keywords)
def Osm(**keywords):
"""Create a Osm Datasource.
Required keyword arguments:
file -- path to OSM file
Optional keyword arguments:
encoding -- file encoding (default 'utf-8')
url -- url to fetch data (default None)
bbox -- data bounding box for fetching data (default None)
>>> from mapnik import Osm, Layer
>>> datasource = Osm(file='test.osm')
>>> lyr = Layer('Osm Layer')
>>> lyr.datasource = datasource
"""
# note: parser only supports libxml2 so not exposing option
# parser -- xml parser to use (default libxml2)
keywords['type'] = 'osm'
return CreateDatasource(keywords)
def Python(**keywords):
"""Create a Python Datasource.
>>> from mapnik import Python, PythonDatasource
>>> datasource = Python('PythonDataSource')
>>> lyr = Layer('Python datasource')
>>> lyr.datasource = datasource
"""
keywords['type'] = 'python'
return CreateDatasource(keywords)
def MemoryDatasource(**keywords):
"""Create a Memory Datasource.
Optional keyword arguments:
(TODO)
"""
params = Parameters()
params.append(Parameter('type', 'memory'))
return MemoryDatasourceBase(params)
class PythonDatasource(object):
"""A base class for a Python data source.
Optional arguments:
envelope -- a mapnik.Box2d (minx, miny, maxx, maxy) envelope of the data source, default (-180,-90,180,90)
geometry_type -- one of the DataGeometryType enumeration values, default Point
data_type -- one of the DataType enumerations, default Vector
"""
def __init__(self, envelope=None, geometry_type=None, data_type=None):
self.envelope = envelope or Box2d(-180, -90, 180, 90)
self.geometry_type = geometry_type or DataGeometryType.Point
self.data_type = data_type or DataType.Vector
def features(self, query):
"""Return an iterable which yields instances of Feature for features within the passed query.
Required arguments:
query -- a Query instance specifying the region for which features should be returned
"""
return None
def features_at_point(self, point):
"""Rarely used. Return an iterable which yields instances of Feature for the specified point."""
return None
@classmethod
def wkb_features(cls, keys, features):
"""A convenience function to wrap an iterator yielding pairs of WKB format geometry and dictionaries of
key-value pairs into mapnik features. Return this from PythonDatasource.features() passing it a sequence of keys
to appear in the output and an iterator yielding features.
For example. One might have a features() method in a derived class like the following:
def features(self, query):
# ... create WKB features feat1 and feat2
return mapnik.PythonDatasource.wkb_features(
keys = ( 'name', 'author' ),
features = [
(feat1, { 'name': 'feat1', 'author': 'alice' }),
(feat2, { 'name': 'feat2', 'author': 'bob' }),
]
)
"""
ctx = Context()
[ctx.push(x) for x in keys]
def make_it(feat, idx):
f = Feature(ctx, idx)
geom, attrs = feat
f.add_geometries_from_wkb(geom)
for k, v in attrs.iteritems():
f[k] = v
return f
return itertools.imap(make_it, features, itertools.count(1))
@classmethod
def wkt_features(cls, keys, features):
"""A convenience function to wrap an iterator yielding pairs of WKT format geometry and dictionaries of
key-value pairs into mapnik features. Return this from PythonDatasource.features() passing it a sequence of keys
to appear in the output and an iterator yielding features.
For example. One might have a features() method in a derived class like the following:
def features(self, query):
# ... create WKT features feat1 and feat2
return mapnik.PythonDatasource.wkt_features(
keys = ( 'name', 'author' ),
features = [
(feat1, { 'name': 'feat1', 'author': 'alice' }),
(feat2, { 'name': 'feat2', 'author': 'bob' }),
]
)
"""
ctx = Context()
[ctx.push(x) for x in keys]
def make_it(feat, idx):
f = Feature(ctx, idx)
geom, attrs = feat
f.add_geometries_from_wkt(geom)
for k, v in attrs.iteritems():
f[k] = v
return f
return itertools.imap(make_it, features, itertools.count(1))
class _TextSymbolizer(TextSymbolizer, _injector):
@property
def name(self):
if isinstance(self.properties.format_tree, FormattingText):
return self.properties.format_tree.text
else:
# There is no single expression which could be returned as name
raise RuntimeError(
"TextSymbolizer uses complex formatting features, but old compatibility interface is used to access it. Use self.properties.format_tree instead.")
@name.setter
def name(self, name):
self.properties.format_tree = FormattingText(name)
@property
def text_size(self):
return self.format.text_size
@text_size.setter
def text_size(self, text_size):
self.format.text_size = text_size
@property
def face_name(self):
return self.format.face_name
@face_name.setter
def face_name(self, face_name):
self.format.face_name = face_name
@property
def fontset(self):
return self.format.fontset
@fontset.setter
def fontset(self, fontset):
self.format.fontset = fontset
@property
def character_spacing(self):
return self.format.character_spacing
@character_spacing.setter
def character_spacing(self, character_spacing):
self.format.character_spacing = character_spacing
@property
def line_spacing(self):
return self.format.line_spacing
@line_spacing.setter
def line_spacing(self, line_spacing):
self.format.line_spacing = line_spacing
@property
def text_opacity(self):
return self.format.text_opacity
@text_opacity.setter
def text_opacity(self, text_opacity):
self.format.text_opacity = text_opacity
@property
def wrap_before(self):
return self.format.wrap_before
@wrap_before.setter
def wrap_before(self, wrap_before):
self.format.wrap_before = wrap_before
@property
def text_transform(self):
return self.format.text_transform
@text_transform.setter
def text_transform(self, text_transform):
self.format.text_transform = text_transform
@property
def fill(self):
return self.format.fill
@fill.setter
def fill(self, fill):
self.format.fill = fill
@property
def halo_fill(self):
return self.format.halo_fill
@halo_fill.setter
def halo_fill(self, halo_fill):
self.format.halo_fill = halo_fill
@property
def halo_radius(self):
return self.format.halo_radius
@halo_radius.setter
def halo_radius(self, halo_radius):
self.format.halo_radius = halo_radius
@property
def label_placement(self):
return self.properties.label_placement
@label_placement.setter
def label_placement(self, label_placement):
self.properties.label_placement = label_placement
@property
def horizontal_alignment(self):
return self.properties.horizontal_alignment
@horizontal_alignment.setter
def horizontal_alignment(self, horizontal_alignment):
self.properties.horizontal_alignment = horizontal_alignment
@property
def justify_alignment(self):
return self.properties.justify_alignment
@justify_alignment.setter
def justify_alignment(self, justify_alignment):
self.properties.justify_alignment = justify_alignment
@property
def vertical_alignment(self):
return self.properties.vertical_alignment
@vertical_alignment.setter
def vertical_alignment(self, vertical_alignment):
self.properties.vertical_alignment = vertical_alignment
@property
def orientation(self):
return self.properties.orientation
@orientation.setter
def orientation(self, orientation):
self.properties.orientation = orientation
@property
def displacement(self):
return self.properties.displacement
@displacement.setter
def displacement(self, displacement):
self.properties.displacement = displacement
@property
def label_spacing(self):
return self.properties.label_spacing
@label_spacing.setter
def label_spacing(self, label_spacing):
self.properties.label_spacing = label_spacing
@property
def label_position_tolerance(self):
return self.properties.label_position_tolerance
@label_position_tolerance.setter
def label_position_tolerance(self, label_position_tolerance):
self.properties.label_position_tolerance = label_position_tolerance
@property
def avoid_edges(self):
return self.properties.avoid_edges
@avoid_edges.setter
def avoid_edges(self, avoid_edges):
self.properties.avoid_edges = avoid_edges
@property
def minimum_distance(self):
return self.properties.minimum_distance
@minimum_distance.setter
def minimum_distance(self, minimum_distance):
self.properties.minimum_distance = minimum_distance
@property
def minimum_padding(self):
return self.properties.minimum_padding
@minimum_padding.setter
def minimum_padding(self, minimum_padding):
self.properties.minimum_padding = minimum_padding
@property
def minimum_path_length(self):
return self.properties.minimum_path_length
@minimum_path_length.setter
def minimum_path_length(self, minimum_path_length):
self.properties.minimum_path_length = minimum_path_length
@property
def maximum_angle_char_delta(self):
return self.properties.maximum_angle_char_delta
@maximum_angle_char_delta.setter
def maximum_angle_char_delta(self, maximum_angle_char_delta):
self.properties.maximum_angle_char_delta = maximum_angle_char_delta
@property
def allow_overlap(self):
return self.properties.allow_overlap
@allow_overlap.setter
def allow_overlap(self, allow_overlap):
self.properties.allow_overlap = allow_overlap
@property
def text_ratio(self):
return self.properties.text_ratio
@text_ratio.setter
def text_ratio(self, text_ratio):
self.properties.text_ratio = text_ratio
@property
def wrap_width(self):
return self.properties.wrap_width
@wrap_width.setter
def wrap_width(self, wrap_width):
self.properties.wrap_width = wrap_width
def mapnik_version_from_string(version_string):
"""Return the Mapnik version from a string."""
n = version_string.split('.')
return (int(n[0]) * 100000) + (int(n[1]) * 100) + (int(n[2]))
def register_plugins(path=None):
"""Register plugins located by specified path"""
if not path:
if 'MAPNIK_INPUT_PLUGINS_DIRECTORY' in os.environ:
path = os.environ.get('MAPNIK_INPUT_PLUGINS_DIRECTORY')
else:
from .paths import inputpluginspath
path = inputpluginspath
DatasourceCache.register_datasources(path)
def register_fonts(path=None, valid_extensions=[
'.ttf', '.otf', '.ttc', '.pfa', '.pfb', '.ttc', '.dfont', '.woff']):
"""Recursively register fonts using path argument as base directory"""
if not path:
if 'MAPNIK_FONT_DIRECTORY' in os.environ:
path = os.environ.get('MAPNIK_FONT_DIRECTORY')
else:
from .paths import fontscollectionpath
path = fontscollectionpath
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename.lower())[1] in valid_extensions:
FontEngine.instance().register_font(os.path.join(dirpath, filename))
# auto-register known plugins and fonts
register_plugins()
register_fonts()
|
garnertb/python-mapnik
|
mapnik/__init__.py
|
Python
|
lgpl-2.1
| 35,734
|
from django.http import HttpResponseNotAllowed, JsonResponse
from django.shortcuts import resolve_url
from django.conf import settings
import mock
from wristband.authentication.views import login_view, logout_view
@mock.patch('wristband.authentication.views.login')
@mock.patch('wristband.authentication.views.authenticate')
def test_login_view(mocked_authenticate, mocked_login, client, django_user_model):
mock_user = django_user_model(username='test_user')
mocked_authenticate.return_value = mock_user
url = resolve_url('login')
response = client.post(url, {'username': 'test_user', 'password': 'password'})
mocked_authenticate.assert_called_with(username='test_user', password='password')
assert isinstance(response, JsonResponse)
assert response.status_code == 200
assert 'session_key' in response.content
@mock.patch('wristband.authentication.views.authenticate')
def test_login_view_bad_credential(mocked_authenticate, rf):
mocked_authenticate.return_value = None
url = resolve_url('login')
request = rf.post(url, {'username': 'test_user', 'password': 'password'})
response = login_view(request)
mocked_authenticate.assert_called_with(username='test_user', password='password')
assert 'Please ensure you are using your test login' in response.content
assert isinstance(response, JsonResponse)
assert response.status_code == 401
def test_login_view_wrong_method(client):
url = resolve_url('login')
response = client.get(url)
assert isinstance(response, HttpResponseNotAllowed)
@mock.patch('wristband.authentication.views.logout')
def test_logout_view(mocked_logout, rf):
url = resolve_url('logout')
request = rf.get(url)
response = logout_view(request)
assert isinstance(response, JsonResponse)
mocked_logout.assert_called_with(request)
def test_logout_view_wrong_method(client):
url = resolve_url('logout')
response = client.post(url)
assert isinstance(response, HttpResponseNotAllowed)
|
hmrc/wristband
|
wristband/authentication/tests/test_views.py
|
Python
|
apache-2.0
| 2,014
|
# -*- coding: utf-8 -*-
"""
@author: Jeff Cavner
@contact: jcavner@ku.edu
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
import os
import types
import zipfile
import numpy as np
from collections import namedtuple
from PyQt4.QtGui import *
from PyQt4.QtCore import QSettings, Qt, SIGNAL, QUrl
from qgis.core import *
from qgis.gui import *
from lifemapperTools.tools.ui_newExperimentDialog import Ui_Dialog
from lifemapperTools.tools.listPALayers import ListPALayersDialog
from lifemapperTools.tools.constructGrid import ConstructGridDialog
from lifemapperTools.tools.uploadLayers import UploadDialog
from lifemapperTools.tools.listBuckets import ListBucketsDialog
from lifemapperTools.tools.addSDMLayer import UploadSDMDialog
from lifemapperTools.common.pluginconstants import ListExperiments, GENERIC_REQUEST
from lifemapperTools.common.pluginconstants import QGISProject
from lifemapperTools.common.workspace import Workspace
from lifemapperTools.tools.radTable import RADTable
from lifemapperTools.tools.uploadTreeOTL import UploadTreeDialog
from lifemapperTools.common.communicate import Communicate
class NewExperimentDialog(QDialog, Ui_Dialog):
# .............................................................................
# Constructor
# .............................................................................
def __init__(self, iface, RADids=None, inputs=None, client=None, email=None):
QDialog.__init__(self)
#self.setWindowFlags(self.windowFlags() & Qt.WindowMinimizeButtonHint)
self.interface = iface
self.workspace = Workspace(self.interface,client)
self.checkExperiments()
self.setupUi()
self.client = client
#cc = self.rejectBut
#bok = self.acceptBut
self.expId = None
self.mapunits = None
self.keyvalues = {}
if email is not None:
self.keyvalues['email'] = email
#_Controller.__init__(self, iface, BASE_URL=ListExperiments.BASE_URL,
# STATUS_URL=ListExperiments.STATUS_URL,
# REST_URL=ListExperiments.REST_URL,
# cancel_close=cc, okayButton=bok, ids=RADids,
# initializeWithData=False, client=client)
# ..............................................................................
def _checkQgisProjForKey(self):
project = QgsProject.instance()
filename = str(project.fileName())
found = False
s = QSettings()
for key in s.allKeys():
if 'RADExpProj' in key:
value = str(s.value(key))
if value == filename:
found = True
expId = key.split('_')[1]
s.setValue("currentExpID", int(expId))
return found
# ..............................................................................
def checkExperiments(self):
"""
@summary: gets the current expId, if there is one it gets the current
project path associated with that id. If there is a project path, it
triggers a save project. If there is no path, it asks a save as, and sets
the project path for the id. The last thing it does is to open a new
qgis project
"""
s = QSettings()
currentExpId = s.value("currentExpID",QGISProject.NOEXPID,type=int)
if currentExpId != QGISProject.NOEXPID:
currentpath = str(s.value("RADExpProj_"+str(currentExpId),
QGISProject.NOPROJECT))
if currentpath != QGISProject.NOPROJECT and currentpath != '':
self.interface.actionSaveProject().trigger()
else:
if len(QgsMapLayerRegistry.instance().mapLayers().items()) > 0:
#self.interface.actionSaveProjectAs().trigger()
self.workspace.saveQgsProjectAs(currentExpId)
# now actionNewProject
self.interface.actionNewProject().trigger()
s.setValue("currentExpID",QGISProject.NOEXPID)
else: # no experiment Id
# there is a case where a Qgis project can be opened but there is no
# current id, like after a sign out but that Qgis project belongs to an id, in that case it needs
# to start a new project
if len(QgsMapLayerRegistry.instance().mapLayers().items()) == 0 or self._checkQgisProjForKey():
self.interface.actionNewProject().trigger()
# ..............................................................................
#def accept(self):
#
#
# valid = self.validate()
# if self.expId is not None:
# self.openNewDialog()
# elif valid and self.expId is None:
# self.startThread(GENERIC_REQUEST,outputfunc = self.newExperimentCallBack,
# requestfunc=self.client.rad.postExperiment, client=self.client,
# inputs=self.keyvalues)
# elif not valid and self.expId is None:
# pass
# ..............................................................................
def postNewOpen(self,buttonValue):
valid = self.validate()
if self.expId is not None:
self.openNewDialog(buttonValue)
elif valid and self.expId is None:
try:
print self.keyvalues
exp = self.client.rad.postExperiment(**self.keyvalues)
except Exception, e:
message = "Error posting new experiment "+str(e)
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
self.newExperimentCallBack(exp,buttonValue)
elif not valid and self.expId is None:
pass
# ..............................................................................
def validate(self):
valid = True
message = ""
self.keyvalues['epsgCode'] = self.epsgEdit.text()
self.keyvalues['name'] = self.expNameEdit.text()
self.keyvalues['description'] = self.description.toPlainText()
epsg = self.epsgEdit.text()
#self.setMapUnitsFromEPSG(epsg=epsg)
experimentname = self.expNameEdit.text()
if len(experimentname) <= 0:
message = "Please supply a experiment name"
valid = False
elif len(epsg) <= 0:
message = "Please supply an EPSG code"
valid = False
else:
self.setMapUnitsFromEPSG(epsg=epsg)
if self.mapunits is None or self.mapunits == 'UnknownUnit':
message = "Invalid EPSG Code"
valid = False
if not valid:
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
return valid
# ..............................................................................
def openProjSelectorSetEPSG(self):
"""
@summary: opens the stock qgis projection selector
and sets epsg edit field and set map units attribute
"""
projSelector = QgsGenericProjectionSelector(self)
dialog = projSelector.exec_()
EpsgCode = projSelector.selectedAuthId().replace('EPSG:','')
# some projections don't have epsg's
if dialog != 0:
if EpsgCode != 0: # will be zero if projection doesn't have an epsg
crs = QgsCoordinateReferenceSystem()
crs.createFromOgcWmsCrs( projSelector.selectedAuthId() )
mapunitscode = crs.mapUnits()
if mapunitscode == 0:
self.mapunits = 'meters'
elif mapunitscode == 1:
self.mapunits = 'feet'
elif mapunitscode == 2:
self.mapunits = 'dd'
self.epsgEdit.setText(str(EpsgCode))
else:
# error message saying that the users chosen projection doesn't have a epsg
self.mapunits = None
message = "The projection you have chosen does not have an epsg code"
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
self.mapunits = None
# ..............................................................................
def verifyEmail(self,email):
valid = True
if '@' in email:
atIndex = email.index('@')
domainsubstring = email[atIndex+1:]
if '.' in domainsubstring:
if domainsubstring.index('.') == 0:
valid = False
else:
valid = False
else:
valid = False
return valid
# ..............................................................................
def cleanInputGridLayout(self):
"""@summary: cleans out the input grid layout"""
if not(self.gridLayout_input.isEmpty()):
for childindex in range(0,self.gridLayout_input.count()):
item = self.gridLayout_input.takeAt(0)
if not(type(item) is types.NoneType):
item.widget().deleteLater()
self.gridLayout_input.update()
# ..............................................................................
def setMapUnitsFromEPSG(self,epsg=None):
crs = QgsCoordinateReferenceSystem()
if epsg:
crs.createFromOgcWmsCrs("EPSG:%s" % (str(epsg)))
else:
crs.createFromOgcWmsCrs("EPSG:%s" % (str(self.expEPSG)))
mapunitscode = crs.mapUnits()
if mapunitscode == 0:
self.mapunits = 'meters'
elif mapunitscode == 1:
self.mapunits = 'feet'
elif mapunitscode == 2:
self.mapunits = 'dd'
elif mapunitscode == 3:
self.mapunits = 'UnknownUnit'
# ..............................................................................
# ..............................................................................
def newExperimentCallBack(self, item, buttonValue):
"""
@summary: when a new expid comes back it gets saved to settings as
currentExpID, then calls openNewDialog
"""
self.epsgEdit.setEnabled(False)
self.expNameEdit.setEnabled(False)
self.description.setEnabled(False)
self.emptyRadio.setEnabled(False)
self.expId = item.id
self.expEPSG = item.epsgcode
if self.mapunits is None:
self.setMapUnitsFromEPSG()
self.setNewExperiment()
Communicate.instance().activateRADExp.emit(int(self.expId),self.expEPSG,self.mapunits)
self.openNewDialog(buttonValue)
# ..............................................................................
def setNewExperiment(self):
"""
@summary: sets the currentExpID key in settings and creates a project folder in workspace
and quietly save the new QGIS project to it
"""
try:
s = QSettings()
s.setValue("currentExpID", int(self.expId))
self.workspace.saveQgsProjectAs(self.expId)
except:
QMessageBox.warning(self,"status: ",
"Could not save expId to settings")
# ..............................................................................
def openNewDialog(self,buttonValue):
inputs = {'expId':self.expId}
experimentname = self.keyvalues['name']
if buttonValue == "Grid":
self.constructGridDialog = ConstructGridDialog( self.interface,
inputs = inputs,
client = self.client,
epsg=self.expEPSG,
mapunits=self.mapunits)
self.setModal(False)
self.constructGridDialog.show()
self.listBucketsRadio.setEnabled(True)
elif buttonValue == "SDM":
SDMDialog = UploadSDMDialog(self.interface,
inputs = inputs,
client = self.client,
epsg=self.expEPSG,
experimentname = experimentname,
mapunits=self.mapunits)
self.setModal(False) # has to be closed to continue
SDMDialog.exec_()
self.listPALayersRadio.setEnabled(True)
elif buttonValue == "Tree":
try:
items = self.client.rad.getPALayers(self.expId)
except:
items = None
message = "There is a problem with the layer listing service"
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
if len(items) != 0:
message = "You already have layers in this experiment. You must begin an experiment with trees and their layers to use a tree."
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
elif len(items) == 0:
treeDialog = UploadTreeDialog(self.interface,
inputs = inputs,
client = self.client,
epsg = self.expEPSG,
experimentname=experimentname,
mapunits=self.mapunits)
self.setModal(False)
treeDialog.exec_()
self.listPALayersRadio.setEnabled(True)
elif buttonValue == "Local":
d = UploadDialog(self.interface,
inputs = inputs,
client = self.client,
epsg=self.expEPSG,
experimentname=experimentname,
mapunits=self.mapunits)
d.exec_()
self.listPALayersRadio.setEnabled(True)
elif buttonValue == "Empty":
pass
elif buttonValue == "ListBuckets":
d = ListBucketsDialog(self.interface, inputs=inputs,
client= self.client, epsg=self.expEPSG,
mapunits=self.mapunits)
d.exec_()
elif buttonValue == "ListLayers":
d = ListPALayersDialog(self.interface, inputs=inputs,
client= self.client, epsg=self.expEPSG,
mapunits=self.mapunits)
d.exec_()
#self.acceptBut.setEnabled( True )
# ..............................................................................
def help(self):
self.help = QWidget()
self.help.setWindowTitle('Lifemapper Help')
self.help.resize(600, 400)
self.help.setMinimumSize(600,400)
self.help.setMaximumSize(1000,1000)
layout = QVBoxLayout()
helpDialog = QTextBrowser()
helpDialog.setOpenExternalLinks(True)
#helpDialog.setSearchPaths(['documents'])
helppath = os.path.dirname(os.path.realpath(__file__))+'/documents/help.html'
helpDialog.setSource(QUrl.fromLocalFile(helppath))
helpDialog.scrollToAnchor('newRADExperiment')
layout.addWidget(helpDialog)
self.help.setLayout(layout)
if self.isModal():
self.setModal(False)
self.help.show()
if __name__ == "__main__":
#
import sys
#import_path = "/home/jcavner/workspace/lm3/components/LmClient/LmQGIS/V2/lifemapperTools/"
#sys.path.append(os.path.join(import_path, 'LmShared'))
###
#configPath = os.path.join(import_path, 'config', 'config.ini')
###
#os.environ["LIFEMAPPER_CONFIG_FILE"] = configPath
#from LmClient.lmClientLib import LMClient
#client = LMClient(userId='blank', pwd='blank')
qApp = QApplication(sys.argv)
d = NewExperimentDialog(None)#,experimentId=596106
d.show()
sys.exit(qApp.exec_())
|
lifemapper/LmQGIS
|
lifemapperTools/tools/newExperiment.py
|
Python
|
gpl-2.0
| 17,776
|
# Copyright (c) 2015 Stefano Guglielmetti - jeko@jeko.net
# https://github.com/amicojeko
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# this script is based upon the official tweepy streamin example
# https://github.com/tweepy/tweepy/blob/master/examples/streaming.py
from __future__ import absolute_import, print_function
import os
from subprocess import call
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
script_path = os.path.dirname(os.path.realpath(__file__))
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key=""
consumer_secret=""
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token=""
access_token_secret=""
# This is the string to search in the twitter feed
# May be a word, an #hashtag or a @username
search_string = '#jekotest'
class StdOutListener(StreamListener):
"""
A listener handles tweets are the received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
# I call an external script because the Arduino's Bridge library
# confilcts with the tweepy library
call(["/usr/bin/python", script_path + "/go.py"])
return True
def on_error(self, status):
# TODO: Put some error handling here
return False
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=[search_string])
|
amicojeko/TwitterBlink
|
streaming.py
|
Python
|
mit
| 2,742
|
# Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A module for parsing results.html files generated by old-run-webkit-tests
# This class is one big hack and only needs to exist until we transition to new-run-webkit-tests.
from webkitpy.common.system.deprecated_logging import log
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer
from webkitpy.layout_tests.layout_package import test_results
from webkitpy.layout_tests.layout_package import test_failures
# FIXME: This should be unified with all the layout test results code in the layout_tests package
# This doesn't belong in common.net, but we don't have a better place for it yet.
def path_for_layout_test(test_name):
return "LayoutTests/%s" % test_name
# FIXME: This should be unified with all the layout test results code in the layout_tests package
# This doesn't belong in common.net, but we don't have a better place for it yet.
class LayoutTestResults(object):
"""This class knows how to parse old-run-webkit-tests results.html files."""
stderr_key = u'Tests that had stderr output:'
fail_key = u'Tests where results did not match expected results:'
timeout_key = u'Tests that timed out:'
crash_key = u'Tests that caused the DumpRenderTree tool to crash:'
missing_key = u'Tests that had no expected results (probably new):'
webprocess_crash_key = u'Tests that caused the Web process to crash:'
expected_keys = [
stderr_key,
fail_key,
crash_key,
webprocess_crash_key,
timeout_key,
missing_key,
]
@classmethod
def _failures_from_fail_row(self, row):
# Look at all anchors in this row, and guess what type
# of new-run-webkit-test failures they equate to.
failures = set()
test_name = None
for anchor in row.findAll("a"):
anchor_text = unicode(anchor.string)
if not test_name:
test_name = anchor_text
continue
if anchor_text in ["expected image", "image diffs"] or '%' in anchor_text:
failures.add(test_failures.FailureImageHashMismatch())
elif anchor_text in ["expected", "actual", "diff", "pretty diff"]:
failures.add(test_failures.FailureTextMismatch())
else:
log("Unhandled link text in results.html parsing: %s. Please file a bug against webkitpy." % anchor_text)
# FIXME: Its possible the row contained no links due to ORWT brokeness.
# We should probably assume some type of failure anyway.
return failures
@classmethod
def _failures_from_row(cls, row, table_title):
if table_title == cls.fail_key:
return cls._failures_from_fail_row(row)
if table_title == cls.crash_key:
return [test_failures.FailureCrash()]
if table_title == cls.webprocess_crash_key:
return [test_failures.FailureCrash()]
if table_title == cls.timeout_key:
return [test_failures.FailureTimeout()]
if table_title == cls.missing_key:
return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
return None
@classmethod
def _test_result_from_row(cls, row, table_title):
test_name = unicode(row.find("a").string)
failures = cls._failures_from_row(row, table_title)
# TestResult is a class designed to work with new-run-webkit-tests.
# old-run-webkit-tests does not save quite enough information in results.html for us to parse.
# FIXME: It's unclear if test_name should include LayoutTests or not.
return test_results.TestResult(test_name, failures)
@classmethod
def _parse_results_table(cls, table):
table_title = unicode(table.findPreviousSibling("p").string)
if table_title not in cls.expected_keys:
# This Exception should only ever be hit if run-webkit-tests changes its results.html format.
raise Exception("Unhandled title: %s" % table_title)
# Ignore stderr failures. Everyone ignores them anyway.
if table_title == cls.stderr_key:
return []
# FIXME: We might end with two TestResults object for the same test if it appears in more than one row.
return [cls._test_result_from_row(row, table_title) for row in table.findAll("tr")]
@classmethod
def _parse_results_html(cls, page):
tables = BeautifulSoup(page).findAll("table")
return sum([cls._parse_results_table(table) for table in tables], [])
@classmethod
def results_from_string(cls, string):
if not string:
return None
test_results = cls._parse_results_html(string)
if not test_results:
return None
return cls(test_results)
def __init__(self, test_results):
self._test_results = test_results
self._failure_limit_count = None
# FIXME: run-webkit-tests should store the --exit-after-N-failures value
# (or some indication of early exit) somewhere in the results.html/results.json
# file. Until it does, callers should set the limit to
# --exit-after-N-failures value used in that run. Consumers of LayoutTestResults
# may use that value to know if absence from the failure list means PASS.
# https://bugs.webkit.org/show_bug.cgi?id=58481
def set_failure_limit_count(self, limit):
self._failure_limit_count = limit
def failure_limit_count(self):
return self._failure_limit_count
def test_results(self):
return self._test_results
def results_matching_failure_types(self, failure_types):
return [result for result in self._test_results if result.has_failure_matching_types(failure_types)]
def tests_matching_failure_types(self, failure_types):
return [result.filename for result in self.results_matching_failure_types(failure_types)]
def failing_test_results(self):
# These should match the "fail", "crash", and "timeout" keys.
failure_types = [test_failures.FailureTextMismatch, test_failures.FailureImageHashMismatch, test_failures.FailureCrash, test_failures.FailureTimeout]
return self.results_matching_failure_types(failure_types)
def failing_tests(self):
return [result.filename for result in self.failing_test_results()]
|
danialbehzadi/Nokia-RM-1013-2.0.0.11
|
webkit/Tools/Scripts/webkitpy/common/net/layouttestresults.py
|
Python
|
gpl-3.0
| 7,908
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-04 21:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FiftyDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=10)),
('avg_price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='FiveDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=10)),
('avg_price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=10)),
('quote_price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='TwoHundredDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=10)),
('avg_price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
]
|
colinmcglone/window-time
|
marketgrab/migrations/0001_initial.py
|
Python
|
mit
| 1,748
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/sherlock-and-array
import os
import unittest
from typing import List
def balanced_sums(arr: List[int]) -> str:
left = [0] * len(arr)
right = [0] * len(arr)
for i in range(1, len(arr)):
left[i] = left[i - 1] + arr[i - 1]
right[len(arr) - 1 - i] = right[len(arr) - i] + arr[len(arr) - i]
for i in range(len(arr)):
if left[i] == right[i]:
return 'YES'
return 'NO'
class TestCode(unittest.TestCase):
def runner(self, name):
io_lines = [[[]]] * 2
for index, template in enumerate(['input%s.txt', 'output%s.txt']):
path = os.path.join(os.path.split(__file__)[0], template % name)
with open(path, 'r') as handle:
lines = handle.readlines()
io_lines[index] = [line.strip().split(' ') for line in lines]
count = int(io_lines[0][0][0])
for i in range(count):
arguments = io_lines[0][2 + i * 2]
arguments = [int(item) for item in arguments]
result = balanced_sums(arguments)
self.assertEqual(io_lines[1][i][0], result)
def test_example(self):
self.runner('_example')
def test_00(self):
self.runner('00')
|
altermarkive/Coding-Interviews
|
algorithm-design/hackerrank/sherlock_and_array/sherlockand_array.py
|
Python
|
mit
| 1,280
|
def _nucleotidesToNumerals(nucleotides):
nums = []
for nuc in nucleotides:
nums.append(('a','t','c','g').index(nuc.lower()))
return nums
def _baseFourToChar(nums):
val = 0
nums.reverse()
for i in range(4):
val += int(nums[i]) * ( 4 ** i)
return chr(val)
def _charToBaseFour(char):
val = ord(char)
nums = []
for i in list(range(4)).__reversed__():
divides = val // (4 ** i)
val -= divides * (4 ** i)
nums.append(divides)
return nums
def _convertDNA(dna):
nums = _nucleotidesToNumerals(dna)
rna = ""
frame = []
FRAME_WIDTH = 4
for num in nums:
frame.append(num)
if len(frame) == FRAME_WIDTH:
rna += _baseFourToChar(frame)
frame = []
return rna
def _convertRNA(rna):
dna = ""
for char in rna:
for num in _charToBaseFour(char):
dna += "ATCG"[num]
return dna
|
omegachysis/biohex
|
biohex/bitLibrary/functions.py
|
Python
|
apache-2.0
| 939
|
##
# Copyright (c) 2013-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Common directory service interfaces
"""
from zope.interface.interface import Interface, Attribute
__all__ = [
"IStoreDirectoryService",
"IStoreDirectoryRecord",
]
class IStoreDirectoryError(Exception):
"""
Base class for directory related errors.
"""
class DirectoryRecordNotFoundError(Exception):
"""
Directory record not found.
"""
class IStoreDirectoryService(Interface):
"""
Directory Service for looking up users.
"""
def recordWithUID(uid): #@NoSelf
"""
Return the record for the specified store uid.
@return: the record.
@rtype: L{IStoreDirectoryRecord}
"""
def recordWithGUID(guid): #@NoSelf
"""
Return the record for the specified store guid.
@return: the record.
@rtype: L{IStoreDirectoryRecord}
"""
class IStoreDirectoryRecord(Interface):
"""
Directory record object
A record identifies a "user" in the system.
"""
uid = Attribute("The record UID: C{str}")
shortNames = Attribute("Short names of the record: C{tuple}")
fullName = Attribute("Full name for the entity associated with the record: C{str}")
displayName = Attribute("Display name for entity associated with the record: C{str}")
def serverURI(): #@NoSelf
"""
Return the URI for the record's server "pod".
@return: a URI.
@rtype: C{str}
"""
def server(): #@NoSelf
"""
Return the L{txdav.caldav.datastore.scheduling.localservers.Server} for the record's server "pod".
@return: a pod server record.
@rtype: L{txdav.caldav.datastore.scheduling.localservers.Server}
"""
def thisServer(): #@NoSelf
"""
Indicates whether the record is hosted on this server "pod".
@return: C{True} if hosted by this service.
@rtype: C{bool}
"""
|
trevor/calendarserver
|
txdav/common/idirectoryservice.py
|
Python
|
apache-2.0
| 2,527
|
#!/usr/bin/env python2
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dolfin
import ufl
from exceptions import *
from fenics_overrides import *
__all__ = \
[
"StaticConstant",
"StaticDirichletBC",
"StaticFunction",
"extract_non_static_coefficients",
"is_static_coefficient",
"is_static_bc",
"is_static_form",
"n_non_static_bcs",
"n_non_static_coefficients"
]
def StaticConstant(*args, **kwargs):
"""
Return a Constant which is marked as "static". Arguments are identical to the
Constant function.
"""
c = dolfin.Constant(*args, **kwargs)
if isinstance(c, ufl.tensors.ListTensor):
for c_c in c:
assert(isinstance(c_c, dolfin.Constant))
c_c._time_static = True
else:
assert(isinstance(c, dolfin.Constant))
c._time_static = True
return c
def StaticFunction(*args, **kwargs):
"""
Return a Function which is marked as "static". Arguments are identical to the
Function function.
"""
fn = dolfin.Function(*args, **kwargs)
fn._time_static = True
return fn
class StaticDirichletBC(DirichletBC):
"""
A DirichletBC which is marked as "static". Constructor arguments are identical
to the DOLFIN DirichletBC constructor.
"""
def __init__(self, *args, **kwargs):
DirichletBC.__init__(self, *args, **kwargs)
self._time_static = True
return
def is_static_coefficient(c):
"""
Return whether the supplied argument is a static Coefficient.
"""
return isinstance(c, ufl.constantvalue.ConstantValue) or (hasattr(c, "_time_static") and c._time_static)
def extract_non_static_coefficients(form):
"""
Return all non-static Coefficient s associated with the supplied form.
"""
non_static = []
for c in ufl.algorithms.extract_coefficients(form):
if not is_static_coefficient(c):
non_static.append(c)
return non_static
def n_non_static_coefficients(form):
"""
Return the number of non-static Coefficient s associated with the supplied
form.
"""
non_static = 0
for c in ufl.algorithms.extract_coefficients(form):
if not is_static_coefficient(c):
non_static += 1
return non_static
def is_static_form(form):
"""
Return whether the supplied form is "static".
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
for dep in ufl.algorithms.extract_coefficients(form):
if not is_static_coefficient(dep):
return False
return True
def is_static_bc(bc):
"""
Return whether the supplied DirichletBC is "static".
"""
if not isinstance(bc, dolfin.cpp.DirichletBC):
raise InvalidArgumentException("bc must be a DirichletBC")
return hasattr(bc, "_time_static") and bc._time_static
def n_non_static_bcs(bcs):
"""
Given a list of DirichletBC s, return the number of static DirichletBC s.
"""
if not isinstance(bcs, list):
raise InvalidArgumentException("bcs must be a list of DirichletBC s")
n = 0
for bc in bcs:
if not is_static_bc(bc):
n += 1
return n
|
pf4d/dolfin-adjoint
|
timestepping/python/timestepping/statics.py
|
Python
|
lgpl-3.0
| 3,929
|
"""
Copyright 2015 Zalando SE
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
"""
# Authentication and authorization related decorators
from flask import request
import functools
import logging
import requests
from ..problem import problem
logger = logging.getLogger('connexion.api.security')
# use connection pool for OAuth tokeninfo
adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
session = requests.Session()
session.mount('http://', adapter)
session.mount('https://', adapter)
def security_passthrough(function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
return function
def verify_oauth(token_info_url, allowed_scopes, function):
"""
Decorator to verify oauth
:param token_info_url: Url to get information about the token
:type token_info_url: str
:param allowed_scopes: Set with scopes that are allowed to access the endpoint
:type allowed_scopes: set
:type function: types.FunctionType
:rtype: types.FunctionType
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
logger.debug("%s Oauth verification...", request.url)
authorization = request.headers.get('Authorization') # type: str
if not authorization:
logger.info("... No auth provided. Aborting with 401.")
return problem(401, 'Unauthorized', "No authorization token provided")
else:
try:
_, token = authorization.split() # type: str, str
except ValueError:
return problem(401, 'Unauthorized', 'Invalid authorization header')
logger.debug("... Getting token '%s' from %s", token, token_info_url)
token_request = session.get(token_info_url, params={'access_token': token}, timeout=5)
logger.debug("... Token info (%d): %s", token_request.status_code, token_request.text)
if not token_request.ok:
return problem(401, 'Unauthorized', "Provided oauth token is not valid")
token_info = token_request.json() # type: dict
user_scopes = set(token_info['scope'])
scopes_intersection = user_scopes & allowed_scopes
logger.debug("... Scope intersection: %s", scopes_intersection)
if not scopes_intersection:
logger.info("... User scopes (%s) don't include one of the allowed scopes (%s). Aborting with 401.",
user_scopes, allowed_scopes)
return problem(403, 'Forbidden', "Provided token doesn't have the required scope")
logger.info("... Token authenticated.")
request.user = token_info.get('uid')
request.token_info = token_info
return function(*args, **kwargs)
return wrapper
|
iomedhealth/connexion
|
connexion/decorators/security.py
|
Python
|
apache-2.0
| 3,294
|
import argparse
import os
import pytest
from tests.test_config import load_tests_params, clean_dirs
from data_engine.prepare_data import build_dataset
from nmt_keras.training import train_model
from nmt_keras.apply_model import sample_ensemble, score_corpus
def test_transformer():
params = load_tests_params()
# Current test params: Transformer
params['MODEL_TYPE'] = 'Transformer'
params['TIED_EMBEDDINGS'] = True
params['N_LAYERS_ENCODER'] = 2
params['N_LAYERS_DECODER'] = 2
params['MULTIHEAD_ATTENTION_ACTIVATION'] = 'relu'
params['MODEL_SIZE'] = 8
params['FF_SIZE'] = params['MODEL_SIZE'] * 4
params['N_HEADS'] = 2
params['REBUILD_DATASET'] = True
params['OPTIMIZED_SEARCH'] = True
params['POS_UNK'] = False
dataset = build_dataset(params)
params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
params['MODEL_NAME'] = \
params['TASK_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '_' + params['MODEL_TYPE'] + \
'_model_size_' + str(params['MODEL_SIZE']) + \
'_ff_size_' + str(params['FF_SIZE']) + \
'_num_heads_' + str(params['N_HEADS']) + \
'_encoder_blocks_' + str(params['N_LAYERS_ENCODER']) + \
'_decoder_blocks_' + str(params['N_LAYERS_DECODER']) + \
'_deepout_' + '_'.join([layer[0] for layer in params['DEEP_OUTPUT_LAYERS']]) + \
'_' + params['OPTIMIZER'] + '_' + str(params['LR'])
# Test several NMT-Keras utilities: train, sample, sample_ensemble, score_corpus...
print("Training model")
train_model(params)
params['RELOAD'] = 1
print("Done")
parser = argparse.ArgumentParser('Parser for unit testing')
parser.dataset = os.path.join(
params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
parser.text = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.splits = ['val']
parser.config = params['STORE_PATH'] + '/config.pkl'
parser.models = [params['STORE_PATH'] + '/epoch_' + str(1)]
parser.verbose = 0
parser.dest = None
parser.source = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.target = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['TRG_LAN'])
parser.weights = []
parser.glossary = None
for n_best in [True, False]:
parser.n_best = n_best
print("Sampling with n_best = %s " % str(n_best))
sample_ensemble(parser, params)
print("Done")
print("Scoring corpus")
score_corpus(parser, params)
print("Done")
clean_dirs(params)
if __name__ == '__main__':
pytest.main([__file__])
|
lvapeab/nmt-keras
|
tests/NMT_architectures/unidir_deep_transformer_tied_embeddings.py
|
Python
|
mit
| 2,901
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, absolute_import, unicode_literals
import json
import logging
from os import uname
import sys
import argparse
from osbs import set_logging
from osbs.api import OSBS
from osbs.conf import Configuration
from osbs.constants import DEFAULT_CONFIGURATION_FILE, DEFAULT_CONFIGURATION_SECTION
from osbs.exceptions import OsbsNetworkException, OsbsException
logger = logging.getLogger('osbs')
def print_json_nicely(decoded_json):
print(json.dumps(decoded_json, indent=2))
def cmd_list_builds(args, osbs):
builds = osbs.list_builds(namespace=args.namespace)
if args.output == 'json':
json_output = []
for build in builds:
json_output.append(build.json)
print_json_nicely(json_output)
elif args.output == 'text':
format_str = "{name:48} {status:16} {image:64}"
print(format_str.format(**{"name": "BUILD NAME", "status": "STATUS", "image": "IMAGE NAME"}), file=sys.stderr)
for build in builds:
image = build.get_image_tag()
if args.USER:
if not image.startswith(args.USER + "/"):
continue
b = {
"name": build.get_build_name(),
"status": build.status,
"image": image
}
print(format_str.format(**b))
def cmd_get_build(args, osbs):
build = osbs.get_build(args.BUILD_ID[0], namespace=args.namespace)
build_json = build.json
if args.output == 'json':
print_json_nicely(build_json)
elif args.output == 'text':
metadata = build_json.get("metadata", {})
dockerfile = build.get_dockerfile()
packages = build.get_rpm_packages()
logs = build.get_logs()
repositories_json = build.get_repositories()
repositories_str = None
if repositories_json is not None:
repositories = json.loads(repositories_json)
repositories_template = """\
Primary
{primary}
Unique
{unique}"""
repositories_context = {
"primary": "\n".join(repositories["primary"]),
"unique": "\n".join(repositories["unique"]),
}
repositories_str = repositories_template.format(**repositories_context)
template = """\
BUILD ID: {build_id}
STATUS: {status}
IMAGE: {image}
DATE: {date}
DOCKERFILE
{dockerfile}
BUILD LOGS
{logs}
PACKAGES
{packages}
REPOSITORIES
{repositories}"""
context = {
"build_id": build.get_build_name(),
"status": build.status,
"image": build.get_image_tag(),
"date": build_json['metadata']['creationTimestamp'],
"dockerfile": dockerfile,
"logs": logs,
"packages": packages,
"repositories": repositories_str,
}
print(template.format(**context))
def cmd_build(args, osbs):
build = osbs.create_build(
git_uri=osbs.build_conf.get_git_uri(),
git_ref=osbs.build_conf.get_git_ref(),
user=osbs.build_conf.get_user(),
component=osbs.build_conf.get_component(),
target=osbs.build_conf.get_koji_target(),
architecture=osbs.build_conf.get_architecture(),
yum_repourls=osbs.build_conf.get_yum_repourls(),
namespace=osbs.build_conf.get_namespace(),
)
build_id = build.build_id
if not args.no_logs:
print("Build submitted (%s), watching logs (feel free to interrupt)" % build_id)
for line in osbs.get_build_logs(build_id, follow=True):
print(line)
else:
if args.output == 'json':
print_json_nicely(build.json)
elif args.output == 'text':
print(build_id)
def cmd_build_logs(args, osbs):
build_id = args.BUILD_ID[0]
follow = args.follow
if follow:
for line in osbs.get_build_logs(build_id, follow=True, namespace=args.namespace):
print(line)
else:
logs = osbs.get_build_logs(build_id, follow=False, namespace=args.namespace)
print(logs, end="")
def cmd_watch_build(args, osbs):
build_response = osbs.wait_for_build_to_finish(args.BUILD_ID[0], namespace=args.namespace)
if args.output == 'text':
pass
elif args.output == 'json':
print_json_nicely(build_response.json)
def cmd_get_token(args, osbs): # pylint: disable=W0613
token = osbs.get_token()
print(token)
def cmd_get_user(args, osbs):
args_username = args.USERNAME
if args_username is None:
user_json = osbs.get_user()
else:
args_username = args_username[0]
user_json = osbs.get_user(args_username)
if args.output == 'json':
print_json_nicely(user_json)
elif args.output == 'text':
name = ""
full_name = ""
try:
name = user_json["metadata"]["name"]
except KeyError:
logger.error("\"name\" is not in response")
try:
full_name = user_json["fullName"]
except KeyError:
logger.error("\"full name\" is not in response")
print("Name: \"%s\"\nFull Name: \"%s\"" % (name, full_name))
def cli():
parser = argparse.ArgumentParser(
description="OpenShift Build Service client"
)
exclusive_group = parser.add_mutually_exclusive_group()
exclusive_group.add_argument("--verbose", action="store_true", default=None)
exclusive_group.add_argument("-q", "--quiet", action="store_true")
subparsers = parser.add_subparsers(help='commands')
list_builds_parser = subparsers.add_parser('list-builds', help='list builds in OSBS',
description="list all builds in specified namespace "
"(to list all builds in all namespaces, use --namespace=\"\")")
list_builds_parser.add_argument("USER", help="list builds only for specified username",
nargs="?")
list_builds_parser.set_defaults(func=cmd_list_builds)
watch_build_parser = subparsers.add_parser('watch-build', help='wait till build finishes')
watch_build_parser.add_argument("BUILD_ID", help="build ID", nargs=1)
watch_build_parser.set_defaults(func=cmd_watch_build)
get_build_parser = subparsers.add_parser('get-build', help='get info about build')
get_build_parser.add_argument("BUILD_ID", help="build ID", nargs=1)
get_build_parser.set_defaults(func=cmd_get_build)
get_token_parser = subparsers.add_parser('get-token', help='get authentication token')
get_token_parser.set_defaults(func=cmd_get_token)
get_user_parser = subparsers.add_parser('get-user', help='get info about user')
get_user_parser.add_argument("USERNAME", nargs="?", default=None)
get_user_parser.set_defaults(func=cmd_get_user)
build_logs_parser = subparsers.add_parser('build-logs', help='get or follow build logs')
build_logs_parser.add_argument("BUILD_ID", help="build ID", nargs=1)
build_logs_parser.add_argument("-f", "--follow", help="follow logs as they come", action="store_true",
default=False)
build_logs_parser.set_defaults(func=cmd_build_logs)
build_parser = subparsers.add_parser('build', help='build an image in OSBS')
build_parser.add_argument("--build-type", "-T", action="store", metavar="BUILD_TYPE",
help="build type (prod, simple)")
build_parser.add_argument("--build-json-dir", action="store", metavar="PATH",
help="directory with build jsons")
build_parser.add_argument("-g", "--git-url", action='store', metavar="URL",
required=True, help="URL to git repo")
build_parser.add_argument("--git-commit", action='store', default="master",
help="checkout this commit")
build_parser.add_argument("-t", "--target", action='store',
help="koji target name")
build_parser.add_argument("-a", "--arch", action='store', default=uname()[4],
help="build architecture")
build_parser.add_argument("-u", "--user", action='store', required=True,
help="username (will be image prefix)")
build_parser.add_argument("-c", "--component", action='store', required=True,
help="name of component")
build_parser.add_argument("--no-logs", action='store_true', required=False, default=False,
help="don't print logs after submitting build")
build_parser.add_argument("--add-yum-repo", action='append', metavar="URL",
help="URL of yum repo file")
build_parser.set_defaults(func=cmd_build)
parser.add_argument("--openshift-uri", action='store', metavar="URL",
help="openshift URL to remote API")
parser.add_argument("--registry-uri", action='store', metavar="URL",
help="registry where images should be pushed")
parser.add_argument("--config", action='store', metavar="PATH",
help="path to configuration file", default=DEFAULT_CONFIGURATION_FILE)
parser.add_argument("--instance", "-i", action='store', metavar="SECTION_NAME",
help="section within config for requested instance", default=DEFAULT_CONFIGURATION_SECTION)
parser.add_argument("--username", action='store',
help="username within OSBS")
parser.add_argument("--password", action='store',
help="password within OSBS")
parser.add_argument("--use-kerberos", action='store_true', default=None,
help="use kerberos for authentication")
parser.add_argument("--verify-ssl", action='store_true', default=None,
help="verify CA on secure connections")
parser.add_argument("--with-auth", action="store_true", dest="use_auth", default=None,
help="get and supply oauth token with every request")
parser.add_argument("--without-auth", action="store_false", dest="use_auth", default=None,
help="don't supply oauth tokens to requests")
parser.add_argument("--output", choices=["json", "text"], default="text",
help="pick output type (default=text)")
parser.add_argument("--namespace", help="name of namespace to query against "
"(you may require blank namespace with --namespace=\"\")",
metavar="NAMESPACE", action="store", default="default")
args = parser.parse_args()
return parser, args
def main():
parser, args = cli()
try:
os_conf = Configuration(conf_file=args.config,
conf_section=args.instance,
cli_args=args)
build_conf = Configuration(conf_file=args.config,
conf_section=args.instance,
cli_args=args)
except OsbsException as ex:
logger.error("Configuration error: %s", ex.message)
return -1
is_verbose = os_conf.get_verbosity()
if is_verbose:
set_logging(level=logging.DEBUG)
logger.debug("Logging level set to debug")
elif args.quiet:
set_logging(level=logging.WARNING)
else:
set_logging(level=logging.INFO)
osbs = OSBS(os_conf, build_conf)
try:
args.func(args, osbs)
except AttributeError as ex:
if hasattr(args, 'func'):
raise
else:
parser.print_help()
except KeyboardInterrupt:
print("Quitting on user request.")
return -1
except OsbsNetworkException as ex:
if is_verbose:
raise
else:
logger.error("Network error at %s (%d): %s",
ex.url, ex.status_code, ex.message)
return -1
except Exception as ex: # pylint: disable=broad-except
if is_verbose:
raise
else:
logger.error("Exception caught: %s", repr(ex))
return -1
if __name__ == '__main__':
sys.exit(main())
|
TomasTomecek/osbs
|
osbs/cli/main.py
|
Python
|
bsd-3-clause
| 12,432
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class xorg_x11_fonts(test.test):
"""
Autotest module for testing basic functionality
of xorg_x11_fonts
@author Shoji Sugiyama (shoji@jp.ibm.com)
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
for package in ['gcc', 'libX11-devel']:
if not sm.check_installed(package):
logging.debug("%s missing - trying to install", package)
sm.install(package)
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/xorg_x11_fonts" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./fontcheck.sh', '-tasf', '75dpi'], cwd="%s/xorg_x11_fonts" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
ret_val = subprocess.Popen(['./fontcheck.sh', '-tsf', 'Type1'], cwd="%s/xorg_x11_fonts" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
ret_val = subprocess.Popen(['./fontcheck.sh', '-tf', 'TTF'], cwd="%s/xorg_x11_fonts" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
rajashreer7/autotest-client-tests
|
linux-tools/xorg_x11_fonts/xorg_x11_fonts.py
|
Python
|
gpl-2.0
| 2,125
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.