code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all activities.
To create activities, run create_activities.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201505')
# Create statement object to select only all activities.
statement = dfp.FilterStatement()
# Get activities by statement.
while True:
response = activity_service.getActivitiesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for activity in response['results']:
print ('Activity with ID \'%s\', name \'%s\', and type \'%s\' was '
'found.' % (activity['id'], activity['name'], activity['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
wubr2000/googleads-python-lib
|
examples/dfp/v201505/activity_service/get_all_activities.py
|
Python
|
apache-2.0
| 1,934
|
"""
Material Demo
This demonstrates the various materials the pipeline supports.
It is also a reference scene, for testing BRDF changes.
"""
from __future__ import print_function
import os
import sys
from panda3d.core import Vec3, load_prc_file_data
from direct.showbase.ShowBase import ShowBase
# Change to the current directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Insert the pipeline path to the system path, this is required to be
# able to import the pipeline classes
pipeline_path = "../../"
# Just a special case for my development setup, so I don't accidentally
# commit a wrong path. You can remove this in your own programs.
if not os.path.isfile(os.path.join(pipeline_path, "setup.py")):
pipeline_path = "../../RenderPipeline/"
sys.path.insert(0, pipeline_path)
# Import the render pipeline class
from rpcore import RenderPipeline
# This is a helper class for better camera movement - see below.
from rpcore.util.movement_controller import MovementController
class Application(ShowBase):
def __init__(self):
# Setup window size and title
load_prc_file_data("", """
# win-size 1600 900
window-title Render Pipeline - Material Sample
""")
# Construct the render pipeline
self.render_pipeline = RenderPipeline()
self.render_pipeline.create(self)
self.render_pipeline.daytime_mgr.time = "19:17"
# self.render_pipeline.daytime_mgr.time = "12:00"
# Load the scene
model = self.loader.load_model("scene/TestScene.bam")
model.reparent_to(self.render)
self.render_pipeline.prepare_scene(model)
# Enable parallax mapping on the floor
# self.render_pipeline.set_effect(
# model.find("**/FloorPlane"),
# "effects/default.yaml", {"parallax_mapping": True}, 100)
# Initialize movement controller, this is a convenience class
# to provide an improved camera control compared to Panda3Ds default
# mouse controller.
self.controller = MovementController(self)
self.controller.set_initial_position_hpr(
Vec3(-17.2912578583, -13.290019989, 6.88211250305),
Vec3(-39.7285499573, -14.6770210266, 0.0))
self.controller.setup()
Application().run()
|
tobspr/RenderPipeline-Samples
|
01-Material-Demo/main.py
|
Python
|
mit
| 2,313
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.boot import get_allowed_pages
@frappe.whitelist()
def get(module):
"""Returns data (sections, list of reports, counts) to render module view in desk:
`/desk/#Module/[name]`."""
data = get_data(module)
out = {
"data": data
}
return out
def get_data(module):
"""Get module data for the module view `desk/#Module/[name]`"""
doctype_info = get_doctype_info(module)
data = build_config_from_file(module)
if not data:
data = build_standard_config(module, doctype_info)
else:
add_custom_doctypes(data, doctype_info)
add_section(data, _("Custom Reports"), "icon-list-alt",
get_report_list(module))
data = combine_common_sections(data)
data = apply_permissions(data)
set_last_modified(data)
return data
def build_config_from_file(module):
"""Build module info from `app/config/desktop.py` files."""
data = []
module = frappe.scrub(module)
for app in frappe.get_installed_apps():
try:
data += get_config(app, module)
except ImportError:
pass
return data
def build_standard_config(module, doctype_info):
"""Build standard module data from DocTypes."""
if not frappe.db.get_value("Module Def", module):
frappe.throw(_("Module Not Found"))
data = []
add_section(data, _("Documents"), "icon-star",
[d for d in doctype_info if in_document_section(d)])
add_section(data, _("Setup"), "icon-cog",
[d for d in doctype_info if not in_document_section(d)])
add_section(data, _("Standard Reports"), "icon-list",
get_report_list(module, is_standard="Yes"))
return data
def add_section(data, label, icon, items):
"""Adds a section to the module data."""
if not items: return
data.append({
"label": label,
"icon": icon,
"items": items
})
def add_custom_doctypes(data, doctype_info):
"""Adds Custom DocTypes to modules setup via `config/desktop.py`."""
add_section(data, _("Documents"), "icon-star",
[d for d in doctype_info if (d.custom and in_document_section(d))])
add_section(data, _("Setup"), "icon-cog",
[d for d in doctype_info if (d.custom and not in_document_section(d))])
def in_document_section(d):
"""Returns True if `document_type` property is one of `Master`, `Transaction` or not set."""
return d.document_type in ("Transaction", "Master", "")
def get_doctype_info(module):
"""Returns list of non child DocTypes for given module."""
doctype_info = frappe.db.sql("""select "doctype" as type, name, description,
ifnull(document_type, "") as document_type, ifnull(custom, 0) as custom,
ifnull(issingle, 0) as issingle
from `tabDocType` where module=%s and ifnull(istable, 0)=0
order by ifnull(custom, 0) asc, document_type desc, name asc""", module, as_dict=True)
for d in doctype_info:
d.description = _(d.description or "")
return doctype_info
def combine_common_sections(data):
"""Combine sections declared in separate apps."""
sections = []
sections_dict = {}
for each in data:
if each["label"] not in sections_dict:
sections_dict[each["label"]] = each
sections.append(each)
else:
sections_dict[each["label"]]["items"] += each["items"]
return sections
def apply_permissions(data):
default_country = frappe.db.get_default("country")
user = frappe.get_user()
user.build_permissions()
allowed_pages = get_allowed_pages()
new_data = []
for section in data:
new_items = []
for item in (section.get("items") or []):
item = frappe._dict(item)
if item.country and item.country!=default_country:
continue
if ((item.type=="doctype" and item.name in user.can_read)
or (item.type=="page" and item.name in allowed_pages)
or (item.type=="report" and item.doctype in user.can_get_report)
or item.type=="help"):
new_items.append(item)
if new_items:
new_section = section.copy()
new_section["items"] = new_items
new_data.append(new_section)
return new_data
def get_config(app, module):
"""Load module info from `[app].config.[module]`."""
config = frappe.get_module("{app}.config.{module}".format(app=app, module=module))
config = config.get_data()
for section in config:
for item in section["items"]:
if not "label" in item:
item["label"] = _(item["name"])
return config
def add_setup_section(config, app, module, label, icon):
"""Add common sections to `/desk#Module/Setup`"""
try:
setup_section = get_setup_section(app, module, label, icon)
if setup_section:
config.append(setup_section)
except ImportError:
pass
def get_setup_section(app, module, label, icon):
"""Get the setup section from each module (for global Setup page)."""
config = get_config(app, module)
for section in config:
if section.get("label")==_("Setup"):
return {
"label": label,
"icon": icon,
"items": section["items"]
}
def set_last_modified(data):
for section in data:
for item in section["items"]:
if item["type"] == "doctype":
item["last_modified"] = get_last_modified(item["name"])
def get_last_modified(doctype):
def _get():
try:
last_modified = frappe.get_all(doctype, fields=["max(modified)"], as_list=True, limit_page_length=1)[0][0]
except Exception, e:
if e.args[0]==1146:
last_modified = None
else:
raise
# hack: save as -1 so that it is cached
if last_modified==None:
last_modified = -1
return last_modified
last_modified = frappe.cache().hget("last_modified", doctype, _get)
if last_modified==-1:
last_modified = None
return last_modified
def get_report_list(module, is_standard="No"):
"""Returns list on new style reports for modules."""
reports = frappe.get_list("Report", fields=["name", "ref_doctype", "report_type"], filters=
{"is_standard": is_standard, "disabled": ("in", ("0", "NULL", "")), "module": module},
order_by="name")
out = []
for r in reports:
out.append({
"type": "report",
"doctype": r.ref_doctype,
"is_query_report": 1 if r.report_type in ("Query Report", "Script Report") else 0,
"label": _(r.name),
"name": r.name
})
return out
|
mbauskar/tele-frappe
|
frappe/desk/moduleview.py
|
Python
|
mit
| 6,132
|
# -*- coding: utf-8 -*-
""" S3 Query Construction
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("FS",
"S3FieldSelector",
"S3Joins",
"S3ResourceField",
"S3ResourceQuery",
"S3URLQuery",
"S3URLQueryParser",
)
import datetime
import re
import sys
from gluon import current
from gluon.storage import Storage
from s3dal import Field, Row
from s3fields import S3RepresentLazy
from s3utils import s3_get_foreign_key, s3_unicode, S3TypeConverter
ogetattr = object.__getattribute__
TEXTTYPES = ("string", "text")
# =============================================================================
class S3FieldSelector(object):
""" Helper class to construct a resource query """
LOWER = "lower"
UPPER = "upper"
OPERATORS = [LOWER, UPPER]
def __init__(self, name, type=None):
""" Constructor """
if not isinstance(name, basestring) or not name:
raise SyntaxError("name required")
self.name = str(name)
self.type = type
self.op = None
# -------------------------------------------------------------------------
def __lt__(self, value):
return S3ResourceQuery(S3ResourceQuery.LT, self, value)
# -------------------------------------------------------------------------
def __le__(self, value):
return S3ResourceQuery(S3ResourceQuery.LE, self, value)
# -------------------------------------------------------------------------
def __eq__(self, value):
return S3ResourceQuery(S3ResourceQuery.EQ, self, value)
# -------------------------------------------------------------------------
def __ne__(self, value):
return S3ResourceQuery(S3ResourceQuery.NE, self, value)
# -------------------------------------------------------------------------
def __ge__(self, value):
return S3ResourceQuery(S3ResourceQuery.GE, self, value)
# -------------------------------------------------------------------------
def __gt__(self, value):
return S3ResourceQuery(S3ResourceQuery.GT, self, value)
# -------------------------------------------------------------------------
def like(self, value):
return S3ResourceQuery(S3ResourceQuery.LIKE, self, value)
# -------------------------------------------------------------------------
def belongs(self, value):
return S3ResourceQuery(S3ResourceQuery.BELONGS, self, value)
# -------------------------------------------------------------------------
def contains(self, value):
return S3ResourceQuery(S3ResourceQuery.CONTAINS, self, value)
# -------------------------------------------------------------------------
def anyof(self, value):
return S3ResourceQuery(S3ResourceQuery.ANYOF, self, value)
# -------------------------------------------------------------------------
def typeof(self, value):
return S3ResourceQuery(S3ResourceQuery.TYPEOF, self, value)
# -------------------------------------------------------------------------
def lower(self):
self.op = self.LOWER
return self
# -------------------------------------------------------------------------
def upper(self):
self.op = self.UPPER
return self
# -------------------------------------------------------------------------
def expr(self, val):
if self.op and val is not None:
if self.op == self.LOWER and \
hasattr(val, "lower") and callable(val.lower) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.lower()
elif self.op == self.UPPER and \
hasattr(val, "upper") and callable(val.upper) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.upper()
return val
# -------------------------------------------------------------------------
def represent(self, resource):
try:
rfield = S3ResourceField(resource, self.name)
except:
colname = None
else:
colname = rfield.colname
if colname:
if self.op is not None:
return "%s.%s()" % (colname, self.op)
else:
return colname
else:
return "(%s?)" % self.name
# -------------------------------------------------------------------------
@classmethod
def extract(cls, resource, row, field):
"""
Extract a value from a Row
@param resource: the resource
@param row: the Row
@param field: the field
@return: field if field is not a Field/S3FieldSelector instance,
the value from the row otherwise
"""
error = lambda fn: KeyError("Field not found: %s" % fn)
t = type(field)
if isinstance(field, Field):
colname = str(field)
tname, fname = colname.split(".", 1)
elif t is S3FieldSelector:
rfield = S3ResourceField(resource, field.name)
colname = rfield.colname
if not colname:
# unresolvable selector
raise error(field.name)
fname = rfield.fname
tname = rfield.tname
elif t is S3ResourceField:
colname = field.colname
if not colname:
# unresolved selector
return None
fname = field.fname
tname = field.tname
else:
return field
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
raise error(colname)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise error(colname)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if hasattr(field, "expr"):
return field.expr(value)
return value
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Resolve this field against a resource
@param resource: the resource
"""
return S3ResourceField(resource, self.name)
# =============================================================================
# Short name for the S3FieldSelector class
#
FS = S3FieldSelector
# =============================================================================
class S3FieldPath(object):
""" Helper class to parse field selectors """
# -------------------------------------------------------------------------
@classmethod
def resolve(cls, resource, selector, tail=None):
"""
Resolve a selector (=field path) against a resource
@param resource: the S3Resource to resolve against
@param selector: the field selector string
@param tail: tokens to append to the selector
The general syntax for a selector is:
selector = {[alias].}{[key]$}[field|selector]
(Parts in {} are optional, | indicates alternatives)
* Alias can be:
~ refers to the resource addressed by the
preceding parts of the selector (=last
resource)
component alias of a component of the last resource
linktable alias of a link table of the last resource
table name of a table that has a foreign key for
the last resource (auto-detect the key)
key:table same as above, but specifying the foreign key
* Key can be:
key the name of a foreign key in the last resource
context a context expression
* Field can be:
fieldname the name of a field or virtual field of the
last resource
context a context expression
A "context expression" is a name enclosed in parentheses:
(context)
During parsing, context expressions get replaced by the
string which has been configured for this name for the
last resource with:
s3db.configure(tablename, context = dict(name = "string"))
With context expressions, the same selector can be used
for different resources, each time resolving into the
specific field path. However, the field addressed must
be of the same type in all resources to form valid
queries.
If a context name can not be resolved, resolve() will
still succeed - but the S3FieldPath returned will have
colname=None and ftype="context" (=unresolvable context).
"""
if not selector:
raise SyntaxError("Invalid selector: %s" % selector)
tokens = re.split("(\.|\$)", selector)
if tail:
tokens.extend(tail)
parser = cls(resource, None, tokens)
parser.original = selector
return parser
# -------------------------------------------------------------------------
def __init__(self, resource, table, tokens):
"""
Constructor - not to be called directly, use resolve() instead
@param resource: the S3Resource
@param table: the table
@param tokens: the tokens as list
"""
s3db = current.s3db
if table is None:
table = resource.table
# Initialize
self.original = None
self.tname = table._tablename
self.fname = None
self.field = None
self.ftype = None
self.virtual = False
self.colname = None
self.joins = {}
self.distinct = False
self.multiple = True
head = tokens.pop(0)
tail = None
if head and head[0] == "(" and head[-1] == ")":
# Context expression
head = head.strip("()")
self.fname = head
self.ftype = "context"
if not resource:
resource = s3db.resource(table, components=[])
context = resource.get_config("context")
if context and head in context:
tail = self.resolve(resource, context[head], tail=tokens)
else:
# unresolvable
pass
elif tokens:
# Resolve the tail
op = tokens.pop(0)
if tokens:
if op == ".":
# head is a component or linktable alias, and tokens is
# a field expression in the component/linked table
if not resource:
resource = s3db.resource(table, components=[])
ktable, join, m, d = self._resolve_alias(resource, head)
self.multiple = m
self.distinct = d
else:
# head is a foreign key in the current table and tokens is
# a field expression in the referenced table
ktable, join = self._resolve_key(table, head)
self.distinct = True
if join is not None:
self.joins[ktable._tablename] = join
tail = S3FieldPath(None, ktable, tokens)
else:
raise SyntaxError("trailing operator")
if tail is None:
# End of the expression
if self.ftype != "context":
# Expression is resolved, head is a field name:
self.field = self._resolve_field(table, head)
if not self.field:
self.virtual = True
self.ftype = "virtual"
else:
self.virtual = False
self.ftype = str(self.field.type)
self.fname = head
self.colname = "%s.%s" % (self.tname, self.fname)
else:
# Read field data from tail
self.tname = tail.tname
self.fname = tail.fname
self.field = tail.field
self.ftype = tail.ftype
self.virtual = tail.virtual
self.colname = tail.colname
self.distinct |= tail.distinct
self.multiple |= tail.multiple
self.joins.update(tail.joins)
# -------------------------------------------------------------------------
@staticmethod
def _resolve_field(table, fieldname):
"""
Resolve a field name against the table, recognizes "id" as
table._id.name, and "uid" as current.xml.UID.
@param table: the Table
@param fieldname: the field name
@return: the Field
"""
if fieldname == "uid":
fieldname = current.xml.UID
if fieldname == "id":
field = table._id
elif fieldname in table.fields:
field = ogetattr(table, fieldname)
else:
field = None
return field
# -------------------------------------------------------------------------
@staticmethod
def _resolve_key(table, fieldname):
"""
Resolve a foreign key into the referenced table and the
join and left join between the current table and the
referenced table
@param table: the current Table
@param fieldname: the fieldname of the foreign key
@return: tuple of (referenced table, join, left join)
@raise: AttributeError is either the field or
the referended table are not found
@raise: SyntaxError if the field is not a foreign key
"""
if fieldname in table.fields:
f = table[fieldname]
else:
raise AttributeError("key not found: %s" % fieldname)
ktablename, pkey, multiple = s3_get_foreign_key(f, m2m=False)
if not ktablename:
raise SyntaxError("%s is not a foreign key" % f)
ktable = current.s3db.table(ktablename,
AttributeError("undefined table %s" % ktablename),
db_only=True)
pkey = ktable[pkey] if pkey else ktable._id
join = [ktable.on(f == pkey)]
return ktable, join
# -------------------------------------------------------------------------
@staticmethod
def _resolve_alias(resource, alias):
"""
Resolve a table alias into the linked table (component, linktable
or free join), and the joins and left joins between the current
resource and the linked table.
@param resource: the current S3Resource
@param alias: the alias
@return: tuple of (linked table, joins, left joins, multiple,
distinct), the two latter being flags to indicate
possible ambiguous query results (needed by the query
builder)
@raise: AttributeError if one of the key fields or tables
can not be found
@raise: SyntaxError if the alias can not be resolved (e.g.
because on of the keys isn't a foreign key, points
to the wrong table or is ambiguous)
"""
# Alias for this resource?
if alias in ("~", resource.alias):
return resource.table, None, False, False
multiple = True
linked = resource.linked
if linked and linked.alias == alias:
# It's the linked table
linktable = resource.table
ktable = linked.table
join = [ktable.on(ktable[linked.fkey] == linktable[linked.rkey])]
return ktable, join, multiple, True
s3db = current.s3db
tablename = resource.tablename
# Try to attach the component
if alias not in resource.components and \
alias not in resource.links:
_alias = alias
hook = s3db.get_component(tablename, alias)
if not hook:
_alias = s3db.get_alias(tablename, alias)
if _alias:
hook = s3db.get_component(tablename, _alias)
if hook:
resource._attach(_alias, hook)
components = resource.components
links = resource.links
if alias in components:
# Is a component
component = components[alias]
ktable = component.table
join = component._join()
multiple = component.multiple
elif alias in links:
# Is a linktable
link = links[alias]
ktable = link.table
join = link._join()
elif "_" in alias:
# Is a free join
DELETED = current.xml.DELETED
table = resource.table
tablename = resource.tablename
pkey = fkey = None
# Find the table
fkey, kname = (alias.split(":") + [None])[:2]
if not kname:
fkey, kname = kname, fkey
ktable = s3db.table(kname,
AttributeError("table not found: %s" % kname),
db_only=True)
if fkey is None:
# Autodetect left key
for fname in ktable.fields:
tn, key, m = s3_get_foreign_key(ktable[fname], m2m=False)
if not tn:
continue
if tn == tablename:
if fkey is not None:
raise SyntaxError("ambiguous foreign key in %s" %
alias)
else:
fkey = fname
if key:
pkey = key
if fkey is None:
raise SyntaxError("no foreign key for %s in %s" %
(tablename, kname))
else:
# Check left key
if fkey not in ktable.fields:
raise AttributeError("no field %s in %s" % (fkey, kname))
tn, pkey, m = s3_get_foreign_key(ktable[fkey], m2m=False)
if tn and tn != tablename:
raise SyntaxError("%s.%s is not a foreign key for %s" %
(kname, fkey, tablename))
elif not tn:
raise SyntaxError("%s.%s is not a foreign key" %
(kname, fkey))
# Default primary key
if pkey is None:
pkey = table._id.name
# Build join
query = (table[pkey] == ktable[fkey])
if DELETED in ktable.fields:
query &= ktable[DELETED] != True
join = [ktable.on(query)]
else:
raise SyntaxError("Invalid tablename: %s" % alias)
return ktable, join, multiple, True
# =============================================================================
class S3ResourceField(object):
""" Helper class to resolve a field selector against a resource """
# -------------------------------------------------------------------------
def __init__(self, resource, selector, label=None):
"""
Constructor
@param resource: the resource
@param selector: the field selector (string)
"""
self.resource = resource
self.selector = selector
lf = S3FieldPath.resolve(resource, selector)
self.tname = lf.tname
self.fname = lf.fname
self.colname = lf.colname
self._joins = lf.joins
self.distinct = lf.distinct
self.multiple = lf.multiple
self._join = None
self.field = lf.field
self.virtual = False
self.represent = s3_unicode
self.requires = None
if self.field is not None:
field = self.field
self.ftype = str(field.type)
if resource.linked is not None and self.ftype == "id":
# Always represent the link-table's ID as the
# linked record's ID => needed for data tables
self.represent = lambda i, resource=resource: \
resource.component_id(None, i)
else:
self.represent = field.represent
self.requires = field.requires
elif self.colname:
self.virtual = True
self.ftype = "virtual"
else:
self.ftype = "context"
# Fall back to the field label
if label is None:
fname = self.fname
if fname in ["L1", "L2", "L3", "L3", "L4", "L5"]:
try:
label = current.gis.get_location_hierarchy(fname)
except:
label = None
elif fname == "L0":
label = current.messages.COUNTRY
if label is None:
f = self.field
if f:
label = f.label
elif fname:
label = " ".join([s.strip().capitalize()
for s in fname.split("_") if s])
else:
label = None
self.label = label
self.show = True
# -------------------------------------------------------------------------
def __repr__(self):
""" String representation of this instance """
return "<S3ResourceField " \
"selector='%s' " \
"label='%s' " \
"table='%s' " \
"field='%s' " \
"type='%s'>" % \
(self.selector, self.label, self.tname, self.fname, self.ftype)
# -------------------------------------------------------------------------
@property
def join(self):
"""
Implicit join (Query) for this field, for backwards-compatibility
"""
if self._join is not None:
return self._join
join = self._join = {}
for tablename, joins in self._joins.items():
query = None
for expression in joins:
if query is None:
query = expression.second
else:
query &= expression.second
if query:
join[tablename] = query
return join
# -------------------------------------------------------------------------
@property
def left(self):
"""
The left joins for this field, for backwards-compability
"""
return self._joins
# -------------------------------------------------------------------------
def extract(self, row, represent=False, lazy=False):
"""
Extract the value for this field from a row
@param row: the Row
@param represent: render a text representation for the value
@param lazy: return a lazy representation handle if available
"""
tname = self.tname
fname = self.fname
colname = self.colname
error = "Field not found in Row: %s" % colname
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
raise KeyError(error)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise KeyError(error)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if represent:
renderer = self.represent
if callable(renderer):
if lazy and hasattr(renderer, "bulk"):
return S3RepresentLazy(value, renderer)
else:
return renderer(value)
else:
return s3_unicode(value)
else:
return value
# =============================================================================
class S3Joins(object):
""" A collection of joins """
def __init__(self, tablename, joins=None):
"""
Constructor
@param tablename: the name of the master table
@param joins: list of joins
"""
self.tablename = tablename
self.joins = {}
self.tables = set()
self.add(joins)
# -------------------------------------------------------------------------
def __iter__(self):
"""
Iterate over the names of all joined tables in the collection
"""
return self.joins.__iter__()
# -------------------------------------------------------------------------
def __getitem__(self, tablename):
"""
Get the list of joins for a table
@param tablename: the tablename
"""
return self.joins.__getitem__(tablename)
# -------------------------------------------------------------------------
def __setitem__(self, tablename, joins):
"""
Update the joins for a table
@param tablename: the tablename
@param joins: the list of joins for this table
"""
master = self.tablename
joins_dict = self.joins
tables = current.db._adapter.tables
joins_dict[tablename] = joins
if len(joins) > 1:
for join in joins:
try:
tname = join.first._tablename
except AttributeError:
tname = str(join.first)
if tname not in joins_dict and \
master in tables(join.second):
joins_dict[tname] = [join]
self.tables.add(tablename)
return
# -------------------------------------------------------------------------
def keys(self):
"""
Get a list of names of all joined tables
"""
return self.joins.keys()
# -------------------------------------------------------------------------
def items(self):
"""
Get a list of tuples (tablename, [joins]) for all joined tables
"""
return self.joins.items()
# -------------------------------------------------------------------------
def values(self):
"""
Get a list of joins for all joined tables
@return: a nested list like [[join, join, ...], ...]
"""
return self.joins.values()
# -------------------------------------------------------------------------
def add(self, joins):
"""
Add joins to this collection
@param joins: a join or a list/tuple of joins
@return: the list of names of all tables for which joins have
been added to the collection
"""
tablenames = set()
if joins:
if not isinstance(joins, (list, tuple)):
joins = [joins]
for join in joins:
tablename = join.first._tablename
self[tablename] = [join]
tablenames.add(tablename)
return list(tablenames)
# -------------------------------------------------------------------------
def extend(self, other):
"""
Extend this collection with the joins from another collection
@param other: the other collection (S3Joins), or a dict like
{tablename: [join, join]}
@return: the list of names of all tables for which joins have
been added to the collection
"""
if type(other) is S3Joins:
add = self.tables.add
else:
add = None
joins = self.joins if type(other) is S3Joins else self
for tablename in other:
if tablename not in self.joins:
joins[tablename] = other[tablename]
if add:
add(tablename)
return other.keys()
# -------------------------------------------------------------------------
def __repr__(self):
"""
String representation of this collection
"""
return "<S3Joins %s>" % str([str(j) for j in self.as_list()])
# -------------------------------------------------------------------------
def as_list(self, tablenames=None, aqueries=None, prefer=None):
"""
Return joins from this collection as list
@param tablenames: the names of the tables for which joins
shall be returned, defaults to all tables
in the collection. Dependencies will be
included automatically (if available)
@param aqueries: dict of accessible-queries {tablename: query}
to include in the joins; if there is no entry
for a particular table, then it will be looked
up from current.auth and added to the dict.
To prevent differential authorization of a
particular joined table, set {<tablename>: None}
in the dict
@param prefer: If any table or any of its dependencies would be
joined by this S3Joins collection, then skip this
table here (and enforce it to be joined by the
preferred collection), to prevent duplication of
left joins as inner joins:
join = inner_joins.as_list(prefer=left_joins)
left = left_joins.as_list()
@return: a list of joins, ordered by their interdependency, which
can be used as join/left parameter of Set.select()
"""
accessible_query = current.auth.s3_accessible_query
if tablenames is None:
tablenames = self.tables
else:
tablenames = set(tablenames)
skip = set()
if prefer:
preferred_joins = prefer.as_list(tablenames=tablenames)
for join in preferred_joins:
try:
tname = join.first._tablename
except AttributeError:
tname = str(join.first)
skip.add(tname)
tablenames -= skip
joins = self.joins
# Resolve dependencies
required_tables = set()
get_tables = current.db._adapter.tables
for tablename in tablenames:
if tablename not in joins or \
tablename == self.tablename or \
tablename in skip:
continue
join_list = joins[tablename]
preferred = False
dependencies = set()
for join in join_list:
join_tables = set(get_tables(join.second))
if join_tables:
if any((tname in skip for tname in join_tables)):
preferred = True
dependencies |= join_tables
if preferred:
skip.add(tablename)
skip |= dependencies
prefer.extend({tablename: join_list})
else:
required_tables.add(tablename)
required_tables |= dependencies
# Collect joins
joins_dict = {}
for tablename in required_tables:
if tablename not in joins or tablename == self.tablename:
continue
for join in joins[tablename]:
j = join
table = j.first
tname = table._tablename
if aqueries is not None and tname in tablenames:
if tname not in aqueries:
aquery = accessible_query("read", table)
aqueries[tname] = aquery
else:
aquery = aqueries[tname]
if aquery is not None:
j = join.first.on(join.second & aquery)
joins_dict[tname] = j
# Sort joins (if possible)
try:
return self.sort(joins_dict.values())
except RuntimeError:
return joins_dict.values()
# -------------------------------------------------------------------------
@classmethod
def sort(cls, joins):
"""
Sort a list of left-joins by their interdependency
@param joins: the list of joins
"""
if len(joins) <= 1:
return joins
r = list(joins)
tables = current.db._adapter.tables
append = r.append
head = None
for i in xrange(len(joins)):
join = r.pop(0)
head = join
tablenames = tables(join.second)
for j in r:
try:
tn = j.first._tablename
except AttributeError:
tn = str(j.first)
if tn in tablenames:
head = None
break
if head is not None:
break
else:
append(join)
if head is not None:
return [head] + cls.sort(r)
else:
raise RuntimeError("circular join dependency")
# =============================================================================
class S3ResourceQuery(object):
"""
Helper class representing a resource query
- unlike DAL Query objects, these can be converted to/from URL filters
"""
# Supported operators
NOT = "not"
AND = "and"
OR = "or"
LT = "lt"
LE = "le"
EQ = "eq"
NE = "ne"
GE = "ge"
GT = "gt"
LIKE = "like"
BELONGS = "belongs"
CONTAINS = "contains"
ANYOF = "anyof"
TYPEOF = "typeof"
COMPARISON = [LT, LE, EQ, NE, GE, GT,
LIKE, BELONGS, CONTAINS, ANYOF, TYPEOF]
OPERATORS = [NOT, AND, OR] + COMPARISON
# -------------------------------------------------------------------------
def __init__(self, op, left=None, right=None):
""" Constructor """
if op not in self.OPERATORS:
raise SyntaxError("Invalid operator: %s" % op)
self.op = op
self.left = left
self.right = right
# -------------------------------------------------------------------------
def __and__(self, other):
""" AND """
return S3ResourceQuery(self.AND, self, other)
# -------------------------------------------------------------------------
def __or__(self, other):
""" OR """
return S3ResourceQuery(self.OR, self, other)
# -------------------------------------------------------------------------
def __invert__(self):
""" NOT """
if self.op == self.NOT:
return self.left
else:
return S3ResourceQuery(self.NOT, self)
# -------------------------------------------------------------------------
def _joins(self, resource, left=False):
op = self.op
l = self.left
r = self.right
if op in (self.AND, self.OR):
if isinstance(l, S3ResourceQuery):
ljoins, ld = l._joins(resource, left=left)
else:
ljoins, ld = {}, False
if isinstance(r, S3ResourceQuery):
rjoins, rd = r._joins(resource, left=left)
else:
rjoins, rd = {}, False
ljoins = dict(ljoins)
ljoins.update(rjoins)
return (ljoins, ld or rd)
elif op == self.NOT:
if isinstance(l, S3ResourceQuery):
return l._joins(resource, left=left)
else:
return {}, False
joins, distinct = {}, False
if isinstance(l, S3FieldSelector):
try:
rfield = l.resolve(resource)
except (SyntaxError, AttributeError):
pass
else:
distinct = rfield.distinct
if distinct and left or not distinct and not left:
joins = rfield._joins
return (joins, distinct)
# -------------------------------------------------------------------------
def fields(self):
""" Get all field selectors involved with this query """
op = self.op
l = self.left
r = self.right
if op in (self.AND, self.OR):
lf = l.fields()
rf = r.fields()
return lf + rf
elif op == self.NOT:
return l.fields()
elif isinstance(l, S3FieldSelector):
return [l.name]
else:
return []
# -------------------------------------------------------------------------
def split(self, resource):
"""
Split this query into a real query and a virtual one (AND)
@param resource: the S3Resource
@return: tuple (DAL-translatable sub-query, virtual filter),
both S3ResourceQuery instances
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
lq, lf = l.split(resource) \
if isinstance(l, S3ResourceQuery) else (l, None)
rq, rf = r.split(resource) \
if isinstance(r, S3ResourceQuery) else (r, None)
q = lq
if rq is not None:
if q is not None:
q &= rq
else:
q = rq
f = lf
if rf is not None:
if f is not None:
f &= rf
else:
f = rf
return q, f
elif op == self.OR:
lq, lf = l.split(resource) \
if isinstance(l, S3ResourceQuery) else (l, None)
rq, rf = r.split(resource) \
if isinstance(r, S3ResourceQuery) else (r, None)
if lf is not None or rf is not None:
return None, self
else:
q = lq
if rq is not None:
if q is not None:
q |= rq
else:
q = rq
return q, None
elif op == self.NOT:
if isinstance(l, S3ResourceQuery):
if l.op == self.OR:
i = (~(l.left)) & (~(l.right))
return i.split(resource)
else:
q, f = l.split(resource)
if q is not None and f is not None:
return None, self
elif q is not None:
return ~q, None
elif f is not None:
return None, ~f
else:
return ~l, None
l = self.left
try:
if isinstance(l, S3FieldSelector):
lfield = l.resolve(resource)
else:
lfield = S3ResourceField(resource, l)
except:
lfield = None
if not lfield or lfield.field is None:
return None, self
else:
return self, None
# -------------------------------------------------------------------------
def transform(self, resource):
"""
Placeholder for transformation method
@param resource: the S3Resource
"""
# @todo: implement
return self
# -------------------------------------------------------------------------
def query(self, resource):
"""
Convert this S3ResourceQuery into a DAL query, ignoring virtual
fields (the necessary joins for this query can be constructed
with the joins() method)
@param resource: the resource to resolve the query against
"""
op = self.op
l = self.left
r = self.right
# Resolve query components
if op == self.AND:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
r = r.query(resource) if isinstance(r, S3ResourceQuery) else r
if l is None or r is None:
return None
elif l is False or r is False:
return l if r is False else r if l is False else False
else:
return l & r
elif op == self.OR:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
r = r.query(resource) if isinstance(r, S3ResourceQuery) else r
if l is None or r is None:
return None
elif l is False or r is False:
return l if r is False else r if l is False else False
else:
return l | r
elif op == self.NOT:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
if l is None:
return None
elif l is False:
return False
else:
return ~l
# Resolve the fields
if isinstance(l, S3FieldSelector):
try:
rfield = S3ResourceField(resource, l.name)
except:
return None
if rfield.virtual:
return None
elif not rfield.field:
return False
lfield = l.expr(rfield.field)
elif isinstance(l, Field):
lfield = l
else:
return None # not a field at all
if isinstance(r, S3FieldSelector):
try:
rfield = S3ResourceField(resource, r.name)
except:
return None
rfield = rfield.field
if rfield.virtual:
return None
elif not rfield.field:
return False
rfield = r.expr(rfield.field)
else:
rfield = r
# Resolve the operator
invert = False
query_bare = self._query_bare
ftype = str(lfield.type)
if isinstance(rfield, (list, tuple)) and ftype[:4] != "list":
if op == self.EQ:
op = self.BELONGS
elif op == self.NE:
op = self.BELONGS
invert = True
elif op not in (self.BELONGS, self.TYPEOF):
query = None
for v in rfield:
q = query_bare(op, lfield, v)
if q is not None:
if query is None:
query = q
else:
query |= q
return query
# Convert date(time) strings
if ftype == "datetime" and \
isinstance(rfield, basestring):
rfield = S3TypeConverter.convert(datetime.datetime, rfield)
elif ftype == "date" and \
isinstance(rfield, basestring):
rfield = S3TypeConverter.convert(datetime.date, rfield)
query = query_bare(op, lfield, rfield)
if invert:
query = ~(query)
return query
# -------------------------------------------------------------------------
def _query_bare(self, op, l, r):
"""
Translate a filter expression into a DAL query
@param op: the operator
@param l: the left operand
@param r: the right operand
"""
if op == self.CONTAINS:
q = l.contains(r, all=True)
elif op == self.ANYOF:
# NB str/int doesn't matter here
q = l.contains(r, all=False)
elif op == self.BELONGS:
q = self._query_belongs(l, r)
elif op == self.TYPEOF:
q = self._query_typeof(l, r)
elif op == self.LIKE:
q = l.like(s3_unicode(r))
elif op == self.LT:
q = l < r
elif op == self.LE:
q = l <= r
elif op == self.EQ:
q = l == r
elif op == self.NE:
q = l != r
elif op == self.GE:
q = l >= r
elif op == self.GT:
q = l > r
else:
q = None
return q
# -------------------------------------------------------------------------
def _query_typeof(self, l, r):
"""
Translate TYPEOF into DAL expression
@param l: the left operator
@param r: the right operator
"""
hierarchy, field, nodeset, none = self._resolve_hierarchy(l, r)
if not hierarchy:
# Not a hierarchical query => use simple belongs
return self._query_belongs(l, r)
if not field:
# Field does not exist (=>skip subquery)
return None
# Construct the subquery
list_type = str(field.type)[:5] == "list:"
if nodeset:
if list_type:
q = (field.contains(list(nodeset)))
elif len(nodeset) > 1:
q = (field.belongs(nodeset))
else:
q = (field == tuple(nodeset)[0])
else:
q = None
if none:
# None needs special handling with older DAL versions
if not list_type:
if q is None:
q = (field == None)
else:
q |= (field == None)
if q is None:
# Values not resolvable (=subquery always fails)
q = field.belongs(set())
return q
# -------------------------------------------------------------------------
@classmethod
def _resolve_hierarchy(cls, l, r):
"""
Resolve the hierarchical lookup in a typeof-query
@param l: the left operator
@param r: the right operator
"""
from s3hierarchy import S3Hierarchy
tablename = l.tablename
# Connect to the hierarchy
hierarchy = S3Hierarchy(tablename)
if hierarchy.config is None:
# Reference to a hierarchical table?
ktablename, key = s3_get_foreign_key(l)[:2]
if ktablename:
hierarchy = S3Hierarchy(ktablename)
else:
key = None
list_type = str(l.type)[:5] == "list:"
if hierarchy.config is None and not list_type:
# No hierarchy configured and no list:reference
return False, None, None, None
field, keys = l, r
if not key:
s3db = current.s3db
table = s3db[tablename]
if l.name != table._id.name:
# Lookup-field rather than primary key => resolve it
# Build a filter expression for the lookup table
fs = S3FieldSelector(l.name)
if list_type:
expr = fs.contains(r)
else:
expr = cls._query_belongs(l, r, field = fs)
# Resolve filter expression into subquery
resource = s3db.resource(tablename)
if expr is not None:
subquery = expr.query(resource)
else:
subquery = None
if not subquery:
# Field doesn't exist
return True, None, None, None
# Execute query and retrieve the lookup table IDs
DELETED = current.xml.DELETED
if DELETED in table.fields:
subquery &= table[DELETED] != True
rows = current.db(subquery).select(table._id)
# Override field/keys
field = table[hierarchy.pkey.name]
keys = set([row[table._id.name] for row in rows])
nodeset, none = None, False
if keys:
# Lookup all descendant types from the hierarchy
none = False
if not isinstance(keys, (list, tuple, set)):
keys = set([keys])
nodes = set()
for node in keys:
if node is None:
none = True
else:
try:
node_id = long(node)
except ValueError:
continue
nodes.add(node_id)
if hierarchy.config is not None:
nodeset = hierarchy.findall(nodes, inclusive=True)
else:
nodeset = nodes
elif keys is None:
none = True
return True, field, nodeset, none
# -------------------------------------------------------------------------
@staticmethod
def _query_belongs(l, r, field=None):
"""
Resolve BELONGS into a DAL expression (or S3ResourceQuery if
field is an S3FieldSelector)
@param l: the left operator
@param r: the right operator
@param field: alternative left operator
"""
if field is None:
field = l
expr = None
none = False
if not isinstance(r, (list, tuple, set)):
items = [r]
else:
items = r
if None in items:
none = True
items = [item for item in items if item is not None]
wildcard = False
if str(l.type) in ("string", "text"):
for item in items:
if isinstance(item, basestring):
if "*" in item and "%" not in item:
s = item.replace("*", "%")
else:
s = item
else:
try:
s = str(item)
except:
continue
if "%" in s:
wildcard = True
_expr = (field.like(s))
else:
_expr = (field == s)
if expr is None:
expr = _expr
else:
expr |= _expr
if not wildcard:
if len(items) == 1:
# Don't use belongs() for single value
expr = (field == tuple(items)[0])
elif items:
expr = (field.belongs(items))
if none:
# None needs special handling with older DAL versions
if expr is None:
expr = (field == None)
else:
expr |= (field == None)
elif expr is None:
expr = field.belongs(set())
return expr
# -------------------------------------------------------------------------
def __call__(self, resource, row, virtual=True):
"""
Probe whether the row matches the query
@param resource: the resource to resolve the query against
@param row: the DB row
@param virtual: execute only virtual queries
"""
if self.op == self.AND:
l = self.left(resource, row, virtual=False)
r = self.right(resource, row, virtual=False)
if l is None:
return r
if r is None:
return l
return l and r
elif self.op == self.OR:
l = self.left(resource, row, virtual=False)
r = self.right(resource, row, virtual=False)
if l is None:
return r
if r is None:
return l
return l or r
elif self.op == self.NOT:
l = self.left(resource, row)
if l is None:
return None
else:
return not l
real = False
left = self.left
if isinstance(left, S3FieldSelector):
try:
lfield = left.resolve(resource)
except (AttributeError, KeyError, SyntaxError):
return None
if lfield.field is not None:
real = True
elif not lfield.virtual:
# Unresolvable expression => skip
return None
else:
lfield = left
if isinstance(left, Field):
real = True
right = self.right
if isinstance(right, S3FieldSelector):
try:
rfield = right.resolve(resource)
except (AttributeError, KeyError, SyntaxError):
return None
if rfield.virtual:
real = False
elif rfield.field is None:
# Unresolvable expression => skip
return None
else:
rfield = right
if virtual and real:
return None
extract = lambda f: S3FieldSelector.extract(resource, row, f)
try:
l = extract(lfield)
r = extract(rfield)
except (KeyError, SyntaxError):
current.log.error(sys.exc_info()[1])
return None
if isinstance(left, S3FieldSelector):
l = left.expr(l)
if isinstance(right, S3FieldSelector):
r = right.expr(r)
op = self.op
invert = False
probe = self._probe
if isinstance(rfield, (list, tuple)) and \
not isinstance(lfield, (list, tuple)):
if op == self.EQ:
op = self.BELONGS
elif op == self.NE:
op = self.BELONGS
invert = True
elif op != self.BELONGS:
for v in r:
try:
r = probe(op, l, v)
except (TypeError, ValueError):
r = False
if r:
return True
return False
try:
r = probe(op, l, r)
except (TypeError, ValueError):
return False
if invert and r is not None:
return not r
else:
return r
# -------------------------------------------------------------------------
def _probe(self, op, l, r):
"""
Probe whether the value pair matches the query
@param l: the left value
@param r: the right value
"""
result = False
convert = S3TypeConverter.convert
# Fallbacks for TYPEOF
if op == self.TYPEOF:
if isinstance(l, (list, tuple, set)):
op = self.ANYOF
elif isinstance(r, (list, tuple, set)):
op = self.BELONGS
else:
op = self.EQ
if op == self.CONTAINS:
r = convert(l, r)
result = self._probe_contains(l, r)
elif op == self.ANYOF:
if not isinstance(r, (list, tuple, set)):
r = [r]
for v in r:
if isinstance(l, (list, tuple, set, basestring)):
if self._probe_contains(l, v):
return True
elif l == v:
return True
return False
elif op == self.BELONGS:
if not isinstance(r, (list, tuple, set)):
r = [r]
r = convert(l, r)
result = self._probe_contains(r, l)
elif op == self.LIKE:
pattern = re.escape(str(r)).replace("\\%", ".*").replace(".*.*", "\\%")
return re.match(pattern, str(l)) is not None
else:
r = convert(l, r)
if op == self.LT:
result = l < r
elif op == self.LE:
result = l <= r
elif op == self.EQ:
result = l == r
elif op == self.NE:
result = l != r
elif op == self.GE:
result = l >= r
elif op == self.GT:
result = l > r
return result
# -------------------------------------------------------------------------
@staticmethod
def _probe_contains(a, b):
"""
Probe whether a contains b
"""
if a is None:
return False
try:
if isinstance(a, basestring):
return str(b) in a
elif isinstance(a, (list, tuple, set)):
if isinstance(b, (list, tuple, set)):
convert = S3TypeConverter.convert
found = True
for _b in b:
if _b not in a:
found = False
for _a in a:
try:
if convert(_a, _b) == _a:
found = True
break
except (TypeError, ValueError):
continue
if not found:
break
return found
else:
return b in a
else:
return str(b) in str(a)
except:
return False
# -------------------------------------------------------------------------
def represent(self, resource):
"""
Represent this query as a human-readable string.
@param resource: the resource to resolve the query against
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
r = r.represent(resource) \
if isinstance(r, S3ResourceQuery) else str(r)
return "(%s and %s)" % (l, r)
elif op == self.OR:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
r = r.represent(resource) \
if isinstance(r, S3ResourceQuery) else str(r)
return "(%s or %s)" % (l, r)
elif op == self.NOT:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
return "(not %s)" % l
else:
if isinstance(l, S3FieldSelector):
l = l.represent(resource)
elif isinstance(l, basestring):
l = '"%s"' % l
if isinstance(r, S3FieldSelector):
r = r.represent(resource)
elif isinstance(r, basestring):
r = '"%s"' % r
if op == self.CONTAINS:
return "(%s in %s)" % (r, l)
elif op == self.BELONGS:
return "(%s in %s)" % (l, r)
elif op == self.ANYOF:
return "(%s contains any of %s)" % (l, r)
elif op == self.TYPEOF:
return "(%s is a type of %s)" % (l, r)
elif op == self.LIKE:
return "(%s like %s)" % (l, r)
elif op == self.LT:
return "(%s < %s)" % (l, r)
elif op == self.LE:
return "(%s <= %s)" % (l, r)
elif op == self.EQ:
return "(%s == %s)" % (l, r)
elif op == self.NE:
return "(%s != %s)" % (l, r)
elif op == self.GE:
return "(%s >= %s)" % (l, r)
elif op == self.GT:
return "(%s > %s)" % (l, r)
else:
return "(%s ?%s? %s)" % (l, op, r)
# -------------------------------------------------------------------------
def serialize_url(self, resource=None):
"""
Serialize this query as URL query
@return: a Storage of URL variables
"""
op = self.op
l = self.left
r = self.right
url_query = Storage()
def _serialize(n, o, v, invert):
try:
quote = lambda s: s if "," not in s else '"%s"' % s
if isinstance(v, list):
v = ",".join([quote(S3TypeConverter.convert(str, val))
for val in v])
else:
v = quote(S3TypeConverter.convert(str, v))
except:
return
if "." not in n:
if resource is not None:
n = "~.%s" % n
else:
return url_query
if o == self.LIKE:
v = v.replace("%", "*")
if o == self.EQ:
operator = ""
else:
operator = "__%s" % o
if invert:
operator = "%s!" % operator
key = "%s%s" % (n, operator)
if key in url_query:
url_query[key] = "%s,%s" % (url_query[key], v)
else:
url_query[key] = v
return url_query
if op == self.AND:
lu = l.serialize_url(resource=resource)
url_query.update(lu)
ru = r.serialize_url(resource=resource)
url_query.update(ru)
elif op == self.OR:
sub = self._or()
if sub is None:
# This OR-subtree is not serializable
return url_query
n, o, v, invert = sub
_serialize(n, o, v, invert)
elif op == self.NOT:
lu = l.serialize_url(resource=resource)
for k in lu:
url_query["%s!" % k] = lu[k]
elif isinstance(l, S3FieldSelector):
_serialize(l.name, op, r, False)
return url_query
# -------------------------------------------------------------------------
def _or(self):
"""
Helper method to URL-serialize an OR-subtree in a query in
alternative field selector syntax if they all use the same
operator and value (this is needed to URL-serialize an
S3SearchSimpleWidget query).
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
return None
elif op == self.NOT:
lname, lop, lval, linv = l._or()
return (lname, lop, lval, not linv)
elif op == self.OR:
lvars = l._or()
rvars = r._or()
if lvars is None or rvars is None:
return None
lname, lop, lval, linv = lvars
rname, rop, rval, rinv = rvars
if lop != rop or linv != rinv:
return None
if lname == rname:
return (lname, lop, [lval, rval], linv)
elif lval == rval:
return ("%s|%s" % (lname, rname), lop, lval, linv)
else:
return None
else:
return (l.name, op, r, False)
# =============================================================================
class S3URLQuery(object):
""" URL Query Parser """
# -------------------------------------------------------------------------
@classmethod
def parse(cls, resource, vars):
"""
Construct a Storage of S3ResourceQuery from a Storage of get_vars
@param resource: the S3Resource
@param vars: the get_vars
@return: Storage of S3ResourceQuery like {alias: query}, where
alias is the alias of the component the query concerns
"""
query = Storage()
if resource is None:
return query
if not vars:
return query
subquery = cls._subquery
allof = lambda l, r: l if r is None else r if l is None else r & l
for key, value in vars.iteritems():
if key == "$filter":
# Instantiate the advanced filter parser
parser = S3URLQueryParser()
if parser.parser is None:
# not available
continue
# Multiple $filter expressions?
expressions = value if type(value) is list else [value]
# Default alias (=master)
default_alias = resource.alias
# Parse all expressions
for expression in expressions:
parsed = parser.parse(expression)
for alias in parsed:
q = parsed[alias]
qalias = alias if alias is not None else default_alias
if qalias not in query:
query[qalias] = [q]
else:
query[qalias].append(q)
# Stop here
continue
elif not("." in key or key[0] == "(" and ")" in key):
# Not a filter expression
continue
# Process old-style filters
selectors, op, invert = cls.parse_expression(key)
if type(value) is list:
# Multiple queries with the same selector (AND)
q = reduce(allof,
[subquery(selectors, op, invert, v) for v in value],
None)
else:
q = subquery(selectors, op, invert, value)
if q is None:
continue
# Append to query
if len(selectors) > 1:
aliases = [s.split(".", 1)[0] for s in selectors]
if len(set(aliases)) == 1:
alias = aliases[0]
else:
alias = resource.alias
#alias = resource.alias
else:
alias = selectors[0].split(".", 1)[0]
if alias == "~":
alias = resource.alias
if alias not in query:
query[alias] = [q]
else:
query[alias].append(q)
return query
# -------------------------------------------------------------------------
@staticmethod
def parse_url(url):
"""
Parse a URL query into get_vars
@param query: the URL query string
@return: the get_vars (Storage)
"""
if not url:
return Storage()
elif "?" in url:
query = url.split("?", 1)[1]
elif "=" in url:
query = url
else:
return Storage()
import cgi
dget = cgi.parse_qsl(query, keep_blank_values=1)
get_vars = Storage()
for (key, value) in dget:
if key in get_vars:
if type(get_vars[key]) is list:
get_vars[key].append(value)
else:
get_vars[key] = [get_vars[key], value]
else:
get_vars[key] = value
return get_vars
# -------------------------------------------------------------------------
@staticmethod
def parse_expression(key):
"""
Parse a URL expression
@param key: the key for the URL variable
@return: tuple (selectors, operator, invert)
"""
if key[-1] == "!":
invert = True
else:
invert = False
fs = key.rstrip("!")
op = None
if "__" in fs:
fs, op = fs.split("__", 1)
op = op.strip("_")
if not op:
op = "eq"
if "|" in fs:
selectors = [s for s in fs.split("|") if s]
else:
selectors = [fs]
return selectors, op, invert
# -------------------------------------------------------------------------
@staticmethod
def parse_value(value):
"""
Parse a URL query value
@param value: the value
@return: the parsed value
"""
uquote = lambda w: w.replace('\\"', '\\"\\') \
.strip('"') \
.replace('\\"\\', '"')
NONE = ("NONE", "None")
if type(value) is not list:
value = [value]
vlist = []
for item in value:
w = ""
quote = False
ignore_quote = False
for c in s3_unicode(item):
if c == '"' and not ignore_quote:
w += c
quote = not quote
elif c == "," and not quote:
if w in NONE:
w = None
else:
w = uquote(w).encode("utf-8")
vlist.append(w)
w = ""
else:
w += c
if c == "\\":
ignore_quote = True
else:
ignore_quote = False
if w in NONE:
w = None
else:
w = uquote(w).encode("utf-8")
vlist.append(w)
if len(vlist) == 1:
return vlist[0]
return vlist
# -------------------------------------------------------------------------
@classmethod
def _subquery(cls, selectors, op, invert, value):
"""
Construct a sub-query from URL selectors, operator and value
@param selectors: the selector(s)
@param op: the operator
@param invert: invert the query
@param value: the value
"""
v = cls.parse_value(value)
q = None
for fs in selectors:
if op == S3ResourceQuery.LIKE:
# Auto-lowercase and replace wildcard
f = S3FieldSelector(fs).lower()
if isinstance(v, basestring):
v = v.replace("*", "%").lower()
elif isinstance(v, list):
v = [x.replace("*", "%").lower() for x in v if x is not None]
else:
f = S3FieldSelector(fs)
rquery = None
try:
rquery = S3ResourceQuery(op, f, v)
except SyntaxError:
current.log.error("Invalid URL query operator: %s (sub-query ignored)" % op)
q = None
break
# Invert operation
if invert:
rquery = ~rquery
# Add to subquery
if q is None:
q = rquery
elif invert:
q &= rquery
else:
q |= rquery
return q
# =============================================================================
# Helper to combine multiple queries using AND
#
combine = lambda x, y: x & y if x is not None else y
# =============================================================================
class S3URLQueryParser(object):
""" New-style URL Filter Parser """
def __init__(self):
""" Constructor """
self.parser = None
self.ParseResults = None
self.ParseException = None
self._parser()
# -------------------------------------------------------------------------
def _parser(self):
""" Import PyParsing and define the syntax for filter expressions """
# PyParsing available?
try:
import pyparsing as pp
except ImportError:
current.log.error("Advanced filter syntax requires pyparsing, $filter ignored")
return False
# Selector Syntax
context = lambda s, l, t: t[0].replace("[", "(").replace("]", ")")
selector = pp.Word(pp.alphas + "[]~", pp.alphanums + "_.$:[]")
selector.setParseAction(context)
keyword = lambda x, y: x | pp.Keyword(y) if x else pp.Keyword(y)
# Expression Syntax
function = reduce(keyword, S3FieldSelector.OPERATORS)
expression = function + \
pp.Literal("(").suppress() + \
selector + \
pp.Literal(")").suppress()
# Comparison Syntax
comparison = reduce(keyword, S3ResourceQuery.COMPARISON)
# Value Syntax
number = pp.Regex(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
value = number | \
pp.Keyword("NONE") | \
pp.quotedString | \
pp.Word(pp.alphanums + pp.printables)
qe = pp.Group(pp.Group(expression | selector) +
comparison +
pp.originalTextFor(pp.delimitedList(value, combine=True)))
parser = pp.operatorPrecedence(qe, [("not", 1, pp.opAssoc.RIGHT, ),
("and", 2, pp.opAssoc.LEFT, ),
("or", 2, pp.opAssoc.LEFT, ),
])
self.parser = parser
self.ParseResults = pp.ParseResults
self.ParseException = pp.ParseException
return True
# -------------------------------------------------------------------------
def parse(self, expression):
"""
Parse a string expression and convert it into a dict
of filters (S3ResourceQueries).
@parameter expression: the filter expression as string
@return: a dict of {component_alias: filter_query}
"""
query = {}
parser = self.parser
if not expression or parser is None:
return query
try:
parsed = parser.parseString(expression)
except self.ParseException:
current.log.error("Invalid URL Filter Expression: '%s'" %
expression)
else:
if parsed:
query = self.convert_expression(parsed[0])
return query
# -------------------------------------------------------------------------
def convert_expression(self, expression):
"""
Convert a parsed filter expression into a dict of
filters (S3ResourceQueries)
@param expression: the parsed filter expression (ParseResults)
@returns: a dict of {component_alias: filter_query}
"""
ParseResults = self.ParseResults
convert = self.convert_expression
if isinstance(expression, ParseResults):
first, op, second = ([None, None, None] + list(expression))[-3:]
if isinstance(first, ParseResults):
first = convert(first)
if isinstance(second, ParseResults):
second = convert(second)
if op == "not":
return self._not(second)
elif op == "and":
return self._and(first, second)
elif op == "or":
return self._or(first, second)
elif op in S3ResourceQuery.COMPARISON:
return self._query(op, first, second)
elif op in S3FieldSelector.OPERATORS and second:
selector = S3FieldSelector(second)
selector.op = op
return selector
elif op is None and second:
return S3FieldSelector(second)
else:
return None
# -------------------------------------------------------------------------
def _and(self, first, second):
"""
Conjunction of two query {component_alias: filter_query} (AND)
@param first: the first dict
@param second: the second dict
@return: the combined dict
"""
if not first:
return second
if not second:
return first
result = dict(first)
for alias, subquery in second.items():
if alias not in result:
result[alias] = subquery
else:
result[alias] &= subquery
return result
# -------------------------------------------------------------------------
def _or(self, first, second):
"""
Disjunction of two query dicts {component_alias: filter_query} (OR)
@param first: the first query dict
@param second: the second query dict
@return: the combined dict
"""
if not first:
return second
if not second:
return first
if len(first) > 1:
first = {None: reduce(combine, first.values())}
if len(second) > 1:
second = {None: reduce(combine, second.values())}
falias = first.keys()[0]
salias = second.keys()[0]
alias = falias if falias == salias else None
return {alias: first[falias] | second[salias]}
# -------------------------------------------------------------------------
def _not(self, query):
"""
Negation of a query dict
@param query: the query dict {component_alias: filter_query}
"""
if query is None:
return None
if len(query) == 1:
alias, sub = query.items()[0]
if sub.op == S3ResourceQuery.OR and alias is None:
l = sub.left
r = sub.right
lalias = self._alias(sub.left.left)
ralias = self._alias(sub.right.left)
if lalias == ralias:
return {alias: ~sub}
else:
# not(A or B) => not(A) and not(B)
return {lalias: ~sub.left, ralias: ~sub.right}
else:
if sub.op == S3ResourceQuery.NOT:
return {alias: sub.left}
else:
return {alias: ~sub}
else:
return {None: ~reduce(combine, query.values())}
# -------------------------------------------------------------------------
def _query(self, op, first, second):
"""
Create an S3ResourceQuery
@param op: the operator
@param first: the first operand (=S3FieldSelector)
@param second: the second operand (=value)
"""
if not isinstance(first, S3FieldSelector):
return {}
selector = first
alias = self._alias(selector)
value = S3URLQuery.parse_value(second.strip())
if op == S3ResourceQuery.LIKE:
if isinstance(value, basestring):
value = value.replace("*", "%").lower()
elif isinstance(value, list):
value = [x.replace("*", "%").lower() for x in value if x is not None]
return {alias: S3ResourceQuery(op, selector, value)}
# -------------------------------------------------------------------------
@staticmethod
def _alias(selector):
"""
Get the component alias from an S3FieldSelector (DRY Helper)
@param selector: the S3FieldSelector
@return: the alias as string or None for the master resource
"""
alias = None
if selector and isinstance(selector, S3FieldSelector):
prefix = selector.name.split("$", 1)[0]
if "." in prefix:
alias = prefix.split(".", 1)[0]
if alias in ("~", ""):
alias = None
return alias
# END =========================================================================
|
ScottBuchanan/eden
|
modules/s3/s3query.py
|
Python
|
mit
| 82,174
|
import copy
import unittest
from pyrake.utils.datatypes import CaselessDict
__doctests__ = ['pyrake.utils.datatypes']
class CaselessDictTest(unittest.TestCase):
def test_init(self):
seq = {'red': 1, 'black': 3}
d = CaselessDict(seq)
self.assertEqual(d['red'], 1)
self.assertEqual(d['black'], 3)
seq = (('red', 1), ('black', 3))
d = CaselessDict(seq)
self.assertEqual(d['red'], 1)
self.assertEqual(d['black'], 3)
def test_caseless(self):
d = CaselessDict()
d['key_Lower'] = 1
self.assertEqual(d['KEy_loWer'], 1)
self.assertEqual(d.get('KEy_loWer'), 1)
d['KEY_LOWER'] = 3
self.assertEqual(d['key_Lower'], 3)
self.assertEqual(d.get('key_Lower'), 3)
def test_delete(self):
d = CaselessDict({'key_lower': 1})
del d['key_LOWER']
self.assertRaises(KeyError, d.__getitem__, 'key_LOWER')
self.assertRaises(KeyError, d.__getitem__, 'key_lower')
def test_getdefault(self):
d = CaselessDict()
self.assertEqual(d.get('c', 5), 5)
d['c'] = 10
self.assertEqual(d.get('c', 5), 10)
def test_setdefault(self):
d = CaselessDict({'a': 1, 'b': 2})
r = d.setdefault('A', 5)
self.assertEqual(r, 1)
self.assertEqual(d['A'], 1)
r = d.setdefault('c', 5)
self.assertEqual(r, 5)
self.assertEqual(d['C'], 5)
def test_fromkeys(self):
keys = ('a', 'b')
d = CaselessDict.fromkeys(keys)
self.assertEqual(d['A'], None)
self.assertEqual(d['B'], None)
d = CaselessDict.fromkeys(keys, 1)
self.assertEqual(d['A'], 1)
self.assertEqual(d['B'], 1)
instance = CaselessDict()
d = instance.fromkeys(keys)
self.assertEqual(d['A'], None)
self.assertEqual(d['B'], None)
d = instance.fromkeys(keys, 1)
self.assertEqual(d['A'], 1)
self.assertEqual(d['B'], 1)
def test_contains(self):
d = CaselessDict()
d['a'] = 1
assert 'a' in d
def test_pop(self):
d = CaselessDict()
d['a'] = 1
self.assertEqual(d.pop('A'), 1)
self.assertRaises(KeyError, d.pop, 'A')
def test_normkey(self):
class MyDict(CaselessDict):
def normkey(self, key):
return key.title()
d = MyDict()
d['key-one'] = 2
self.assertEqual(list(d.keys()), ['Key-One'])
def test_normvalue(self):
class MyDict(CaselessDict):
def normvalue(self, value):
if value is not None:
return value + 1
d = MyDict({'key': 1})
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict()
d['key'] = 1
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict()
d.setdefault('key', 1)
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict()
d.update({'key': 1})
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict.fromkeys(('key',), 1)
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
def test_copy(self):
h1 = CaselessDict({'header1': 'value'})
h2 = copy.copy(h1)
self.assertEqual(h1, h2)
self.assertEqual(h1.get('header1'), h2.get('header1'))
assert isinstance(h2, CaselessDict)
if __name__ == "__main__":
unittest.main()
|
elkingtowa/pyrake
|
tests/test_utils_datatypes.py
|
Python
|
mit
| 3,592
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mountains', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Climb',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('mountain', models.CharField(max_length=255)),
('start_date', models.DateField()),
('start_time', models.TimeField()),
('summit_date', models.DateField()),
('summit_time', models.TimeField()),
('finish_date', models.DateField()),
('finish_time', models.TimeField()),
('total_distance', models.PositiveIntegerField()),
('notes', models.TextField()),
('climber_id', models.ForeignKey(default=None, to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
tiradoe/fourteeners-updated
|
fourteeners/apps/mountains/migrations/0002_climb.py
|
Python
|
mit
| 1,220
|
"""Proof of Possession Identifier Validation Challenge."""
import logging
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import zope.component
from acme import challenges
from acme import jose
from acme import other
from letsencrypt import interfaces
from letsencrypt.display import util as display_util
logger = logging.getLogger(__name__)
class ProofOfPossession(object): # pylint: disable=too-few-public-methods
"""Proof of Possession Identifier Validation Challenge.
Based on draft-barnes-acme, section 6.5.
:ivar installer: Installer object
:type installer: :class:`~letsencrypt.interfaces.IInstaller`
"""
def __init__(self, installer):
self.installer = installer
def perform(self, achall):
"""Perform the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:returns: Response or None/False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if (achall.alg in [jose.HS256, jose.HS384, jose.HS512] or
not isinstance(achall.hints.jwk, achall.alg.kty)):
return None
for cert, key, _ in self.installer.get_all_certs_keys():
with open(cert) as cert_file:
cert_data = cert_file.read()
try:
cert_obj = x509.load_pem_x509_certificate(
cert_data, default_backend())
except ValueError:
try:
cert_obj = x509.load_der_x509_certificate(
cert_data, default_backend())
except ValueError:
logger.warn("Certificate is neither PER nor DER: %s", cert)
cert_key = achall.alg.kty(key=cert_obj.public_key())
if cert_key == achall.hints.jwk:
return self._gen_response(achall, key)
# Is there are different prompt we should give the user?
code, key = zope.component.getUtility(
interfaces.IDisplay).input(
"Path to private key for identifier: %s " % achall.domain)
if code != display_util.CANCEL:
return self._gen_response(achall, key)
# If we get here, the key wasn't found
return False
def _gen_response(self, achall, key_path): # pylint: disable=no-self-use
"""Create the response to the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:param str key_path: Path to the key corresponding to the hinted to
public key.
:returns: Response or False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if os.path.isfile(key_path):
with open(key_path, 'rb') as key:
try:
# Needs to be changed if JWKES doesn't have a key attribute
jwk = achall.alg.kty.load(key.read())
sig = other.Signature.from_msg(achall.nonce, jwk.key,
alg=achall.alg)
except (IndexError, ValueError, TypeError, jose.errors.Error):
return False
return challenges.ProofOfPossessionResponse(nonce=achall.nonce,
signature=sig)
return False
|
vcavallo/letsencrypt
|
letsencrypt/proof_of_possession.py
|
Python
|
apache-2.0
| 3,641
|
from app import server
import asyncore
import os
server.basedir = os.path.dirname(os.path.realpath(__file__))
if __name__ == "__main__":
try:
server = server.Server()
server.settings['basedir'] = os.path.dirname(os.path.realpath(__file__))
server.serve_forever()
except:
raise
os.system("kill %s" % str(os.getpid()))
|
barneygale/mcocr
|
app.py
|
Python
|
bsd-3-clause
| 348
|
# Generated by Django 2.1.4 on 2018-12-21 14:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0016_auto_20181202_2205'),
('mainapp', '0016_auto_20181221_1432'),
]
operations = [
]
|
meine-stadt-transparent/meine-stadt-transparent
|
mainapp/migrations/0017_merge_20181221_1508.py
|
Python
|
mit
| 272
|
# Example of unwrapping a decorator
from functools import wraps
def decorator1(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('Decorator 1')
return func(*args, **kwargs)
return wrapper
def decorator2(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('Decorator 2')
return func(*args, **kwargs)
return wrapper
@decorator1
@decorator2
def add(x, y):
return x + y
# Calling wrapped function
print(add(2,3))
# Calling original function
print(add.__wrapped__(2,3))
|
tuanavu/python-cookbook-3rd
|
src/9/unwrapping_a_decorator/example.py
|
Python
|
mit
| 541
|
# -*- coding: utf-8 -*-
# Copyright © 2016 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Simple, inelegant Sphinx extension which adds a directive for a
tabbed parsed-literals that may be switched between in HTML.
version: 0.4
The directive adds these parameters, both optional:
:languages: comma-separated list of pygments languages; default "console"
:tabs: comma-separated list of tabs; default "Linux,Windows"
:mapping: comma-separated list of linked-tabs; default "Linux,Windows"
:copyable: flag to indicate that all text can be "copied"
:single: flag to indicate that only one tab should be used, with no label (not yet implemented)
:independent: flag to indicate that this tab set does not link to another tabs
:dependent: name of tab set this tab belongs to; default "linux-windows"
Separate the code blocks with matching comment lines. Tabs must follow in order of :tabs:
option. Comment labels are for convenience, and don't need to match. Note example uses a
tab label with a space in it, and is enclosed in quotes. Note that the comma-separated
lists must not have spaces in them (outside of quotes); ie, use "java,scala", not
"java, scala".
The mapping maps a tab that is displayed to the trigger that will display it.
For example, you could have a set of tabs:
:tabs: "Mac OS X",Windows
:mapping: linux,windows
:dependent: linux-windows
Clicking on a "Linux" tab in another tab-set would activate the "Mac OS X" tab in this tab set.
The mappings can not use special characters. If a tab uses a special character, a mapping is required.
An error is raised, as it cannot be resolved using the defaults.
Note that slightly different rule operate for replacements: a replacement such as
"\|replace|" will work, and the backslash will be interpreted as a single backslash rather
than as escaping the "|".
If there is only one tab, the node is set to "independent" automatically, as there is
nothing to switch. If :languages: is not supplied for the single tab, "shell-session" is
used.
Lines that begin with "$", "#", ">", ">", "cdap >", "cdap >" are treated as command
lines and the text following is auto-selected for copying on mouse-over. (On Safari,
command-V is still required for copying; other browser support click-copying to the
clipboard.)
FIXME: Implement the ":single:" flag.
Examples:
.. tabbed-parsed-literal::
:languages: console,shell-session
:tabs: "Linux or OS/X",Windows
.. Linux
$ cdap-cli.sh start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
$ curl -o /etc/yum.repos.d/cask.repo http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo
.. Windows
> cdap-cli.bat start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
> <CDAP-SDK-HOME>\libexec\bin\curl.exe -d c:\|release| -X POST 'http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo'
If you pass a single set of commands, without comments, the directive will create a
two-tabbed "Linux" and "Windows" with a generated Windows-equivalent command set. Check
the results carefully, and file an issue if it is unable to create the correct command.
Worst-case: you have to use the full format and enter the two commands. Note that any JSON
strings in the commands must be on a single line to convert successfully.
.. tabbed-parsed-literal::
$ cdap-cli.sh start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
$ curl -o /etc/yum.repos.d/cask.repo http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo
.. tabbed-parsed-literal::
:copyable:
:single:
SELECT * FROM dataset_uniquevisitcount ORDER BY value DESC LIMIT 5
Tab sets are either independent or dependent. Independent tabs do not participate in page or site tab setting.
In other words, clicking on a tab does not change any other tabs. Dependent tabs do. Clicking on the "Linux"
tab will change all other tabs to "Linux". You may need to include a mapping listing the relationship, such as this:
.. tabbed-parsed-literal::
:tabs: Linux,Windows,"Distributed CDAP"
:mapping: Linux,Windows,Linux
:languages: console,shell-session,console
...
This maps the tab "Distributed CDAP" to the other "Linux" tabs on the site. Clicking that
tab would change other tabs to the "linux" tab. (Changing to "linux" from another tab will
cause the first "linux" tab to be selected.)
JavaScript and design of tabs was taken from the Apache Spark Project:
http://spark.apache.org/examples.html
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import ParsedLiteral
from docutils.parsers.rst.roles import set_classes
DEFAULT_LANGUAGES = ['console', 'shell-session']
DEFAULT_TABS = ['linux', 'windows']
DEFAULT_TAB_LABELS = ['Linux', 'Windows']
DEFAULT_TAB_SET = 'linux-windows'
TPL_COUNTER = 0
# Sets the handlers for the tabs used by a particular instance of tabbed parsed literal
# Note doubled {{ to pass them through formatting
DEPENDENT_JS_TPL = """\
<script type="text/javascript">
$(function {div_name}() {{
var tabs = {tab_links};
var mapping = {mapping};
var tabSetID = {tabSetID};
for (var i = 0; i < tabs.length; i++) {{
var tab = tabs[i];
$("#{div_name} .example-tab-" + tab).click(changeExampleTab(tab, mapping, "{div_name}", tabSetID));
}}
}});
</script>
"""
# Note doubled {{ to pass them through formatting
INDEPENDENT_JS_TPL = """\
<script type="text/javascript">
function change_{div_name}_ExampleTab(tab) {{
return function(e) {{
e.preventDefault();
var scrollOffset = $(this).offset().top - $(document).scrollTop();
$("#{div_name} .tab-pane").removeClass("active");
$("#{div_name} .tab-pane-" + tab).addClass("active");
$("#{div_name} .example-tab").removeClass("active");
$("#{div_name} .example-tab-" + tab).addClass("active");
$(document).scrollTop($(this).offset().top - scrollOffset);
}}
}}
$(function() {{
var tabs = {tab_links};
for (var i = 0; i < tabs.length; i++) {{
var tab = tabs[i];
$("#{div_name} .example-tab-" + tab).click(change_{div_name}_ExampleTab(tab));
}}
}});
</script>
"""
DIV_START = """
<div id="{div_name}" class="{class}">
"""
NAV_TABS = """
<ul class="nav nav-tabs">
%s</ul>
"""
NAV_TABS_ENTRY = """\
<li class="example-tab example-tab-{tab_link} {active}"><a href="#">{tab_name}</a></li>
"""
TAB_CONTENT_START = """\
<div class="tab-contents">
"""
DIV_END = """
</div>
"""
TAB_CONTENT_ENTRY_START = """\
<div class="tab-pane tab-pane-{tab_link} {active}">
<div class="code code-tab">
"""
DIV_DIV_END = """
</div>
</div>
"""
def dequote(text):
"""
If text has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the text unchanged.
"""
if (text[0] == text[-1]) and text.startswith(("'", '"')):
return text[1:-1]
return text
def clean_alphanumeric(text):
"""
If text has any non-alphanumeric characters, replace them with a hyphen.
"""
text_clean = ''
for charc in text:
text_clean += charc if charc.isalnum() else '-'
return text_clean
def convert(c, state={}):
"""
Converts a Linux command to a Windows-equivalent following a few simple rules:
- Converts a starting '$' to '>'
- Forward-slashes in 'http[s]' and 'localhost' URIs are preserved
- Other forward-slashes become backslashes
- A lone backslash (the Linux line continuation character) becomes a '^'
- '.sh' commands become '.bat' commands
- removes a "-w'\n'" option from curl commands
- In curl commands, a JSON string (beginning with "-d '{") is converted to all
internal double quotes are escaped and the entire string surrounded in double quotes
- state option allows one line to pass state to the next line to be converted
"""
DEBUG = False
# DEBUG = True
w = []
leading_whitespace = ' ' * (len(c) - len(c.lstrip()))
text_list = c.split()
CLI = 'cdap-cli.sh'
CURL = 'curl'
DATA_OPTIONS = ['-d', '--data', '--data-ascii']
HEADER_OPTIONS = ['-H', '--header']
TRAILING_OPTIONS = ["-w'\\n'", '-w"\\n"']
# Local states
IN_CLI = False
IN_CURL = False
IN_CURL_DATA = False
IN_CURL_DATA_JSON = False
IN_CURL_HEADER = False
IN_CURL_HEADER_ARTIFACT = False
STATE_KEYS = ['IN_CLI', 'IN_CURL', 'IN_CURL_DATA', 'IN_CURL_DATA_JSON', 'IN_CURL_HEADER', 'IN_CURL_HEADER_ARTIFACT']
JSON_OPEN_CLOSE = {
"open_array":"'[",
"open_array_win": "\"[",
"open_object":"'{",
"open_object_win": "\"{",
"open-artifact": "'Artifact-",
"close_array": "]'",
"close_array_win": "]\"",
"close_object": "}'",
"close_object_win": "}\"",
}
# Passed state
for s in STATE_KEYS:
if not state.has_key(s):
state[s] = False
if DEBUG: print "\nconverting: %s\nreceived state: %s" % (c, state)
for i, v in enumerate(text_list):
if DEBUG: print "v:%s" % v # v is the parsed snippet, split on spaces
if v == CLI or state['IN_CLI']:
IN_CLI = True
state['IN_CLI'] = True
if v == CURL or state['IN_CURL']:
IN_CURL = True
state['IN_CURL'] = True
if state['IN_CURL_DATA']:
IN_CURL_DATA = True
if state['IN_CURL_DATA_JSON']:
IN_CURL_DATA_JSON = True
if state['IN_CURL_HEADER']:
IN_CURL_HEADER = True
if state['IN_CURL_HEADER_ARTIFACT']:
IN_CURL_HEADER_ARTIFACT = True
if i == 0 and v == '$':
w.append('>')
for s in STATE_KEYS:
state[s] = False
if DEBUG: print "w.append('>')"
continue
if v.endswith('.sh'):
v = v.replace('.sh', '.bat')
if DEBUG: print "v.replace('.sh', '.bat')"
if v == '\\':
w.append('^')
if IN_CLI:
state['IN_CLI'] = True
if IN_CURL:
state['IN_CURL'] = True
if DEBUG: print "w.append('^')"
continue
if IN_CURL and (v in TRAILING_OPTIONS):
if DEBUG: print "IN_CURL and TRAILING_OPTIONS"
continue
if IN_CURL and (v in DATA_OPTIONS):
if DEBUG: print "IN_CURL and DATA_OPTIONS"
IN_CURL_DATA = True
state['IN_CURL_DATA'] = True
w.append(v)
continue
if IN_CURL and (v in HEADER_OPTIONS):
if DEBUG: print "IN_CURL and HEADER_OPTIONS"
IN_CURL_HEADER = True
state['IN_CURL_HEADER'] = True
w.append(v)
continue
if IN_CURL and IN_CURL_DATA:
if DEBUG: print "IN_CURL and IN_CURL_DATA"
if DEBUG: print "IN_CURL_DATA_JSON: %s" % IN_CURL_DATA_JSON
state['IN_CURL'] = True
if v.startswith(JSON_OPEN_CLOSE["open_array"]) or v.startswith(JSON_OPEN_CLOSE["open_object"]):
if DEBUG: print "Start of json"
IN_CURL_DATA_JSON = True
state['IN_CURL_DATA_JSON'] = True
w.append("\"%s" % v.replace('"', '\\"')[1:])
elif v.endswith(JSON_OPEN_CLOSE["close_array"]) or v.endswith(JSON_OPEN_CLOSE["close_object"]):
if DEBUG: print "End of json"
w.append("%s\"" % v.replace('"', '\\"')[:-1])
IN_CURL_DATA = False
state['IN_CURL_DATA'] = False
IN_CURL_DATA_JSON = False
state['IN_CURL_DATA_JSON'] = False
elif IN_CURL_DATA_JSON:
if DEBUG: print "json..."
w.append(v.replace('"', '\\"'))
else:
if DEBUG: print "data..."
w.append(v)
continue
if IN_CURL and IN_CURL_HEADER:
if DEBUG: print "IN_CURL and IN_CURL_HEADER"
state['IN_CURL'] = True
if v.startswith(JSON_OPEN_CLOSE["open-artifact"]):
if DEBUG: print "Start of json"
IN_CURL_HEADER_ARTIFACT = True
# Don't pass this state, as we aren't tracking where the end is, and assume it is at end-of-line
# To track the end, we would need to push and pop opening and closing quotes...
# state['IN_CURL_HEADER_ARTIFACT'] = True
w.append("\"%s" % v.replace('"', '\\"')[1:])
continue
elif IN_CURL_HEADER_ARTIFACT:
if DEBUG: print "json...escaping double-quotes and replacing single-quotes"
w.append(v.replace('"', '\\"').replace("'", '"'))
else:
# Currently, won't reach this, as once IN_CURL_HEADER_ARTIFACT we never leave until end-of-line
if DEBUG: print "data..."
w.append(v)
continue
if (IN_CLI or IN_CURL) and v.startswith('"'):
if DEBUG: print "v.startswith('\"')"
w.append(v)
continue
if v.find('/') != -1:
if DEBUG: print "found slash: IN_CLI: %s v: %s" % (IN_CLI, v)
if (v.startswith('localhost') or v.startswith('"localhost') or v.startswith('"http:')
or v.startswith('"https:') or v.startswith('http:') or v.startswith('https:')):
if DEBUG: print "v.startswith..."
w.append(v)
continue
if IN_CLI:
if i > 0 and text_list[i-1] in ['body:file', 'artifact']:
if DEBUG: print "IN_CLI and path"
else:
w.append(v)
continue
w.append(v.replace('/', '\\'))
else:
if DEBUG: print "didn't find slash"
w.append(v)
if DEBUG: print "converted to: %s\npassing state: %s" % (leading_whitespace + ' '.join(w), state)
return leading_whitespace + ' '.join(w), state
class TabbedParsedLiteralNode(nodes.literal_block):
"""TabbedParsedLiteralNode is an extended literal_block that supports replacements."""
def cleanup(self):
for i, v in enumerate(self.traverse()):
if isinstance(v, nodes.Text):
t = v.astext()
if t.endswith('.\ ') or t.endswith('=\ '):
t = t[:-2]
if t.find('\`') != -1:
t = t.replace('\`', '`')
if t != v.astext():
self.replace(v, nodes.Text(t))
class TabbedParsedLiteral(ParsedLiteral):
"""TabbedParsedLiteral is a set of different blocks"""
option_spec = dict(dependent=directives.unchanged_required,
independent=directives.flag,
languages=directives.unchanged_required,
mapping=directives.unchanged_required,
tabs=directives.unchanged_required,
copyable=directives.flag,
single=directives.flag,
**ParsedLiteral.option_spec)
has_content = True
def cleanup_content(self):
"""Parses content, looks for comment markers, removes them, prepares backslashes.
Calculates size for each block.
"""
content = self.content
text_block = '\n'.join(content)
if not text_block.startswith('.. ') or text_block.index('\n.. ') == -1:
# There are no comments... generating a Windows-equivalent code
LINUX = ['.. Linux', '']
WINDOWS = ['', '.. Windows', '']
old_content = []
new_content = []
state = {}
for line in self.content:
old_content.append(line)
new_line, state = convert(line, state)
new_content.append(new_line)
content = LINUX + old_content + WINDOWS + new_content
# print "old_content:\n%s\n" % ('\n'.join(old_content))
# print "new_content:\n%s\n" % ('\n'.join(new_content))
line_sets = []
line_set = []
for line in content:
if line.startswith('.. '):
if line_set:
line_sets.append(line_set)
line_set = []
else:
line_set.append(line)
line_sets.append(line_set)
line_counts = []
lines = []
for line_set in line_sets:
block = '\n'.join(line_set).rstrip()
block = block.replace('\\', '\\\\')
block = block.replace('\\|', '\\\ |')
block = block.replace('*', '\*')
block = block.replace(' |-', ' \|-')
block = block.replace('\n|-', '\n\|-')
block = block.replace(' |+', ' \|+')
block = block.replace('\n|+', '\n\|+')
if not block.endswith('\n'):
block += '\n'
lines.append(block)
line_counts.append(block.count('\n') +1)
return line_counts, lines
def cleanup_option(self, option, default, aphanumeric_only=False):
"""Removes leading or trailing quotes or double-quotes from a string option."""
_option = self.options.get(option,'')
if not _option:
return default
else:
return clean_alphanumeric(dequote(_option)) if aphanumeric_only else dequote(_option)
def cleanup_options(self, option, default, aphanumeric_only=False, lower=False):
"""
Removes leading or trailing quotes or double-quotes from a string option list.
Removes non-aphanumeric characters if aphanumeric_only true.
Converts from Unicode to string
"""
_option = self.options.get(option,'')
if not _option:
return default
else:
_options = []
for s in _option.split(","):
s = dequote(s)
s = clean_alphanumeric(s) if aphanumeric_only else s
s = s.lower() if lower else s
_options.append(str(s))
return _options
def run(self):
set_classes(self.options)
self.assert_has_content()
line_counts, lines = self.cleanup_content()
text = '\n'.join(lines)
# Sending text to state machine for inline text replacement
text_nodes, messages = self.state.inline_text(text, self.lineno)
# Debugging Code start
# if messages:
# print "text:\n%s" % text
# print "text_nodes:\n%s" % text_nodes
# for n in text_nodes:
# print "n:\n%s" % n
# print 'messages:'
# for m in messages:
# print m
# Debugging Code end
node = TabbedParsedLiteralNode(text, '', *text_nodes, **self.options)
node.cleanup()
node.line = self.content_offset + 1
self.add_name(node)
node['copyable'] = self.options.has_key('copyable')
node['independent'] = self.options.has_key('independent')
node['languages'] = self.cleanup_options('languages', DEFAULT_LANGUAGES)
node['line_counts'] = line_counts
node['linenos'] = self.cleanup_options('linenos', '')
node['single'] = self.options.has_key('single')
node['tab_labels'] = self.cleanup_options('tabs', DEFAULT_TAB_LABELS)
node['tabs'] = self.cleanup_options('tabs', DEFAULT_TABS, aphanumeric_only=True, lower=True)
tab_count = len(node['tabs'])
if tab_count == 1:
# If only one tab, force to be independent
node['independent'] = True
# If languages were not supplied, make it a shell-session
if not self.options.has_key('languages'):
node['languages'] = [DEFAULT_LANGUAGES[1]]
if tab_count != len(node['languages']):
print "Warning: tabs (%s) don't match languages (%s)" % (node['tabs'], node['languages'])
node['languages'] = [DEFAULT_LANGUAGES[0]] * tab_count
if not node['independent']:
node['dependent'] = self.cleanup_option('dependent', DEFAULT_TAB_SET)
node['mapping'] = self.cleanup_options('mapping', node['tabs'], aphanumeric_only=True, lower=True)
if tab_count != len(node['mapping']):
print "Warning: tabs (%s) don't match mapping (%s)" % (node['tabs'], node['mapping'])
if tab_count > 1:
node['mapping'] = DEFAULT_TABS + [DEFAULT_TABS[0]] * (tab_count -2)
else:
node['mapping'] = [DEFAULT_TABS[0]] * tab_count
return [node] + messages
def visit_tpl_html(self, node):
"""Visit a Tabbed Parsed Literal node"""
global TPL_COUNTER
TPL_COUNTER += 1
def _highlighter(node, text, lang='console'):
linenos = text.count('\n') >= \
self.highlightlinenothreshold - 1
highlight_args = node.get('highlight_args', {})
if lang:
# code-block directives
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
text, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
copyable = node.get('copyable')
new_highlighted = ['','<!-- tabbed-parsed-literal start -->',]
if lang in ['console', 'shell-session', 'ps1', 'powershell']:
# print "highlighted (before):\n%s" % highlighted
# Console-specific highlighting
new_highlighted = ['','<!-- tabbed-parsed-literal start -->',]
continuing_line = False # Indicates current line continues to next
continued_line = False # Indicates current line was continued from previous
copyable_text = False # Indicates that the line (or the previous) now has copyable text in it
for l in highlighted.splitlines():
if copyable:
t = "<pre>"
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"copyable-text\">%s" % (l[:i], l[len(t)+i:])
t = "</pre>"
i = l.find(t)
if i != -1:
l = "%s</span></pre>%s" % (l[:i], l[len(t)+i:])
else:
continuing_line = False
if l:
continuing_line = l.endswith('\\</span>') or l.endswith('^</span>')
# print "continuing_line: %s continued_line: %s l: %s" % (continuing_line, continued_line, l)
for p in ['$', '#', '>', '>', 'cdap >', 'cdap >']:
if l.startswith(p):
l = "<span class=\"gp\">%s</span><span class=\"copyable copyable-text\">%s" % (p, l[1:])
copyable_text = True
break
t = "<pre>%s " % p
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"gp\">%s </span><span \"copyable-text\">%s" % (l[:i], p, l[len(t)+i:])
copyable_text = True
break
t = "<pre><span class=\"go\">%s " % p
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"gp\">%s </span><span class=\"copyable-text\"><span class=\"go\">%s" % (l[:i], p, l[len(t)+i:])
copyable_text = True
break
t = "<pre><span class=\"gp\">%s</span> " % p
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"gp\">%s </span><span class=\"copyable-text\">%s" % (l[:i], p, l[len(t)+i:])
copyable_text = True
break
t = "<span class=\"go\">%s " % p
if l.startswith(t):
if continued_line:
l = "<span class=\"gp\">%s </span><span class=\"go\">%s" % (p, l[len(t):])
else:
l = "<span class=\"gp\">%s </span><span class=\"copyable-text\"><span class=\"go\">%s" % (p, l[len(t):])
copyable_text = True
break
t = "<span class=\"gp\">%s</span> " % p
if l.startswith(t):
if continued_line:
l = "<span class=\"gp\">%s </span>%s" % (p, l[len(t):])
else:
l = "<span class=\"gp\">%s </span><span class=\"copyable-text\">%s" % (p, l[len(t):])
copyable_text = True
break
# print "continuing_line: %s continued_line: %s copyable_text: %s l: %s" % (continuing_line, continued_line, copyable_text, l)
if (continued_line and (not continuing_line)) or (not continued_line and not continuing_line and copyable_text):
# print "continued_line: %s continuing_line: %s copyable_text: %s" % (continued_line, continuing_line, copyable_text)
# End the copyable-text
l += "</span>"
copyable_text = False
new_highlighted.append(l)
# Set next line status
continued_line = continuing_line
else:
new_highlighted += highlighted.splitlines()
new_highlighted.append('<!-- tabbed-parsed-literal end -->')
# print "\nhighlighted (after):\n%s\n\n" % '\n'.join(new_highlighted)
return '\n'.join(new_highlighted)
nav_tabs_html = ''
tab_content_html = ''
languages = node.get('languages')
line_counts = node.get('line_counts')
tabs = node.get('tabs')
tab_labels = node.get('tab_labels')
node_mapping = node.get('mapping')
dependent = node.get('dependent')
clean_tab_links = []
mapping = {}
i = 0
if node_mapping:
for m in node_mapping:
if m in clean_tab_links:
i += 1
m = "%s%d" % (m, i)
clean_tab_links.append(m)
for i in range(len(clean_tab_links)):
mapping[clean_tab_links[i]] = node_mapping[i]
else:
# Independent tabs use the tab for the link
clean_tab_links = tabs
div_name = 'tabbedparsedliteral{0}'.format(TPL_COUNTER)
fill_div_options = {'div_name': div_name}
if node.get('independent'):
# Independent node, doesn't participate in clicks with other nodes and has no mapping
fill_div_options['class'] = 'independent'
js_options = {'tab_links':clean_tab_links, 'div_name':div_name}
js_tpl = INDEPENDENT_JS_TPL
else:
# Dependent node
fill_div_options['class'] = "dependent-%s" % dependent
js_options = {'tab_links':clean_tab_links,
'mapping':repr(mapping),
'div_name':div_name,
'tabSetID':repr(dependent),
}
js_tpl = DEPENDENT_JS_TPL
start_html = js_tpl.format(**js_options) + DIV_START.format(**fill_div_options)
text_list = node.astext().split('\n')
offset = 0
for index in range(len(tabs)):
lang, lines = languages[index], line_counts[index]
tab_name, tab_link = tab_labels[index], clean_tab_links[index]
start_tag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang)
tab_text = text_list[offset:offset + lines]
offset += lines
# Strip any leading empty lines
text = ''
for line in tab_text:
if not line and not text:
continue
elif not text:
text = line
else:
text += '\n' + line
highlighted = _highlighter(node, text, lang)
tab_options = {'active': 'active' if not index else '',
'tab_link': tab_link,
'tab_name': tab_name,}
nav_tabs_html += NAV_TABS_ENTRY.format(**tab_options)
tab_entry_start = TAB_CONTENT_ENTRY_START.format(**tab_options)
tab_content_html += tab_entry_start + start_tag + highlighted + DIV_END + DIV_DIV_END
nav_tabs_html = NAV_TABS % nav_tabs_html
tab_content_html = TAB_CONTENT_START + tab_content_html + DIV_END
self.body.append(start_html + nav_tabs_html + tab_content_html + DIV_END)
raise nodes.SkipNode
def depart_tpl_html(self, node):
"""Depart a Tabbed Parsed Literal node"""
# Stub because of SkipNode in visit
def setup(app):
app.add_directive('tabbed-parsed-literal', TabbedParsedLiteral)
app.add_node(TabbedParsedLiteralNode, html=(visit_tpl_html, depart_tpl_html))
|
caskdata/cdap
|
cdap-docs/_common/tabbed-parsed-literal.py
|
Python
|
apache-2.0
| 30,649
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from ...models import Range1d
from ...properties import Bool, Int
from .._builder import create_and_build
from .bar_builder import BarBuilder
from ..glyphs import HistogramGlyph
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(data, values=None, label=None, color=None, agg="count",
bins=None, yscale="linear", xgrid=False, ygrid=True,
continuous_range=None, **kw):
if continuous_range and not isinstance(continuous_range, Range1d):
raise ValueError(
"continuous_range must be an instance of bokeh.models.ranges.Range1d"
)
# The continuous_range is the y_range (until we implement HBar charts)
y_range = continuous_range
kw['label'] = label
kw['values'] = values
kw['color'] = color
kw['agg'] = agg
kw['yscale'] = yscale
kw['xgrid'] = xgrid
kw['ygrid'] = ygrid
kw['y_range'] = y_range
kw['bins'] = bins
return create_and_build(HistogramBuilder, data, **kw)
class HistogramBuilder(BarBuilder):
"""Generates one to many histograms with unique attributes.
The HistogramBuilder is responsible for producing a chart
containing one to many histograms from table-like inputs.
"""
bins = Int(default=None, help="""
Number of bins to use for the histogram. (default: None
(use Freedman-Diaconis rule)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
glyph = HistogramGlyph
def _setup(self):
super(HistogramBuilder, self)._setup()
if self.attributes['color'].columns is not None:
self.fill_alpha = 0.6
def get_extra_args(self):
return dict(bin_count=self.bins)
def _set_ranges(self):
"""Push the Bar data into the ColumnDataSource and calculate
the proper ranges.
"""
x_max = max([comp_glyph.x_max for comp_glyph in self.comp_glyphs])
x_min = min([comp_glyph.x_min for comp_glyph in self.comp_glyphs])
y_max = max([comp_glyph.y_max for comp_glyph in self.comp_glyphs])
y_min = min([comp_glyph.y_min for comp_glyph in self.comp_glyphs])
x_buffer = ((x_max + x_min)/2.0)*0.1
self.x_range = Range1d(start=x_min - x_buffer, end=x_max + x_buffer)
self.y_range = Range1d(start=y_min, end=y_max * 1.1)
|
srinathv/bokeh
|
bokeh/charts/builder/histogram_builder.py
|
Python
|
bsd-3-clause
| 3,598
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `dm_control.mjcf.copier`."""
import os
from absl.testing import absltest
from dm_control import mjcf
from dm_control.mjcf import parser
import numpy as np
_ASSETS_DIR = os.path.join(os.path.dirname(__file__), 'test_assets')
_TEST_MODEL_XML = os.path.join(_ASSETS_DIR, 'test_model.xml')
_MODEL_WITH_ASSETS_XML = os.path.join(_ASSETS_DIR, 'model_with_assets.xml')
class CopierTest(absltest.TestCase):
def testSimpleCopy(self):
mjcf_model = parser.from_path(_TEST_MODEL_XML)
mixin = mjcf.RootElement(model='test_mixin')
mixin.compiler.boundmass = 1
mjcf_model.include_copy(mixin)
self.assertEqual(mjcf_model.model, 'test') # Model name should not change
self.assertEqual(mjcf_model.compiler.boundmass, mixin.compiler.boundmass)
mixin.compiler.boundinertia = 2
mjcf_model.include_copy(mixin)
self.assertEqual(mjcf_model.compiler.boundinertia,
mixin.compiler.boundinertia)
mixin.compiler.boundinertia = 1
with self.assertRaisesRegex(ValueError, 'Conflicting values'):
mjcf_model.include_copy(mixin)
mixin.worldbody.add('body', name='b_0', pos=[0, 1, 2])
mjcf_model.include_copy(mixin, override_attributes=True)
self.assertEqual(mjcf_model.compiler.boundmass, mixin.compiler.boundmass)
self.assertEqual(mjcf_model.compiler.boundinertia,
mixin.compiler.boundinertia)
np.testing.assert_array_equal(mjcf_model.worldbody.body['b_0'].pos,
[0, 1, 2])
def testCopyingWithReference(self):
sensor_mixin = mjcf.RootElement('sensor_mixin')
touch_site = sensor_mixin.worldbody.add('site', name='touch_site')
sensor_mixin.sensor.add('touch', name='touch_sensor', site=touch_site)
mjcf_model = mjcf.RootElement('model')
mjcf_model.include_copy(sensor_mixin)
# Copied reference should be updated to the copied site.
self.assertIs(mjcf_model.find('sensor', 'touch_sensor').site,
mjcf_model.find('site', 'touch_site'))
def testCopyingWithAssets(self):
mjcf_model = parser.from_path(_MODEL_WITH_ASSETS_XML)
copied = mjcf.RootElement()
copied.include_copy(mjcf_model)
original_assets = (mjcf_model.find_all('mesh')
+ mjcf_model.find_all('texture')
+ mjcf_model.find_all('hfield'))
copied_assets = (copied.find_all('mesh')
+ copied.find_all('texture')
+ copied.find_all('hfield'))
self.assertLen(copied_assets, len(original_assets))
for original_asset, copied_asset in zip(original_assets, copied_assets):
self.assertIs(copied_asset.file, original_asset.file)
if __name__ == '__main__':
absltest.main()
|
deepmind/dm_control
|
dm_control/mjcf/copier_test.py
|
Python
|
apache-2.0
| 3,393
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import datetime
import json
import logging
import multiprocessing
import random
import platform
import socket
import ssl
import sys
from threading import Event, Thread
import time
import six
from cassandra.policies import HostDistance
from cassandra.util import ms_timestamp_from_datetime
from cassandra.datastax.insights.registry import insights_registry
from cassandra.datastax.insights.serializers import initialize_registry
log = logging.getLogger(__name__)
class MonitorReporter(Thread):
def __init__(self, interval_sec, session):
"""
takes an int indicating interval between requests, a function returning
the connection to be used, and the timeout per request
"""
# Thread is an old-style class so we can't super()
Thread.__init__(self, name='monitor_reporter')
initialize_registry(insights_registry)
self._interval, self._session = interval_sec, session
self._shutdown_event = Event()
self.daemon = True
self.start()
def run(self):
self._send_via_rpc(self._get_startup_data())
# introduce some jitter -- send up to 1/10 of _interval early
self._shutdown_event.wait(self._interval * random.uniform(.9, 1))
while not self._shutdown_event.is_set():
start_time = time.time()
self._send_via_rpc(self._get_status_data())
elapsed = time.time() - start_time
self._shutdown_event.wait(max(self._interval - elapsed, 0.01))
# TODO: redundant with ConnectionHeartbeat.ShutdownException
class ShutDownException(Exception):
pass
def _send_via_rpc(self, data):
try:
self._session.execute(
"CALL InsightsRpc.reportInsight(%s)", (json.dumps(data),)
)
log.debug('Insights RPC data: {}'.format(data))
except Exception as e:
log.debug('Insights RPC send failed with {}'.format(e))
log.debug('Insights RPC data: {}'.format(data))
def _get_status_data(self):
cc = self._session.cluster.control_connection
connected_nodes = {
host.address: {
'connections': state['open_count'],
'inFlightQueries': state['in_flights']
}
for (host, state) in self._session.get_pool_state().items()
}
return {
'metadata': {
# shared across drivers; never change
'name': 'driver.status',
# format version
'insightMappingId': 'v1',
'insightType': 'EVENT',
# since epoch
'timestamp': ms_timestamp_from_datetime(datetime.datetime.utcnow()),
'tags': {
'language': 'python'
}
},
# // 'clientId', 'sessionId' and 'controlConnection' are mandatory
# // the rest of the properties are optional
'data': {
# // 'clientId' must be the same as the one provided in the startup message
'clientId': str(self._session.cluster.client_id),
# // 'sessionId' must be the same as the one provided in the startup message
'sessionId': str(self._session.session_id),
'controlConnection': cc._connection.host if cc._connection else None,
'connectedNodes': connected_nodes
}
}
def _get_startup_data(self):
cc = self._session.cluster.control_connection
try:
local_ipaddr = cc._connection._socket.getsockname()[0]
except Exception as e:
local_ipaddr = None
log.debug('Unable to get local socket addr from {}: {}'.format(cc._connection, e))
hostname = socket.getfqdn()
host_distances_counter = Counter(
self._session.cluster.profile_manager.distance(host)
for host in self._session.hosts
)
host_distances_dict = {
'local': host_distances_counter[HostDistance.LOCAL],
'remote': host_distances_counter[HostDistance.REMOTE],
'ignored': host_distances_counter[HostDistance.IGNORED]
}
try:
compression_type = cc._connection._compression_type
except AttributeError:
compression_type = 'NONE'
cert_validation = None
try:
if self._session.cluster.ssl_context:
if isinstance(self._session.cluster.ssl_context, ssl.SSLContext):
cert_validation = self._session.cluster.ssl_context.verify_mode == ssl.CERT_REQUIRED
else: # pyopenssl
from OpenSSL import SSL
cert_validation = self._session.cluster.ssl_context.get_verify_mode() != SSL.VERIFY_NONE
elif self._session.cluster.ssl_options:
cert_validation = self._session.cluster.ssl_options.get('cert_reqs') == ssl.CERT_REQUIRED
except Exception as e:
log.debug('Unable to get the cert validation: {}'.format(e))
uname_info = platform.uname()
return {
'metadata': {
'name': 'driver.startup',
'insightMappingId': 'v1',
'insightType': 'EVENT',
'timestamp': ms_timestamp_from_datetime(datetime.datetime.utcnow()),
'tags': {
'language': 'python'
},
},
'data': {
'driverName': 'DataStax Python Driver',
'driverVersion': sys.modules['cassandra'].__version__,
'clientId': str(self._session.cluster.client_id),
'sessionId': str(self._session.session_id),
'applicationName': self._session.cluster.application_name or 'python',
'applicationNameWasGenerated': not self._session.cluster.application_name,
'applicationVersion': self._session.cluster.application_version,
'contactPoints': self._session.cluster._endpoint_map_for_insights,
'dataCenters': list(set(h.datacenter for h in self._session.cluster.metadata.all_hosts()
if (h.datacenter and
self._session.cluster.profile_manager.distance(h) == HostDistance.LOCAL))),
'initialControlConnection': cc._connection.host if cc._connection else None,
'protocolVersion': self._session.cluster.protocol_version,
'localAddress': local_ipaddr,
'hostName': hostname,
'executionProfiles': insights_registry.serialize(self._session.cluster.profile_manager),
'configuredConnectionLength': host_distances_dict,
'heartbeatInterval': self._session.cluster.idle_heartbeat_interval,
'compression': compression_type.upper() if compression_type else 'NONE',
'reconnectionPolicy': insights_registry.serialize(self._session.cluster.reconnection_policy),
'sslConfigured': {
'enabled': bool(self._session.cluster.ssl_options or self._session.cluster.ssl_context),
'certValidation': cert_validation
},
'authProvider': {
'type': (self._session.cluster.auth_provider.__class__.__name__
if self._session.cluster.auth_provider else
None)
},
'otherOptions': {
},
'platformInfo': {
'os': {
'name': uname_info.system if six.PY3 else uname_info[0],
'version': uname_info.release if six.PY3 else uname_info[2],
'arch': uname_info.machine if six.PY3 else uname_info[4]
},
'cpus': {
'length': multiprocessing.cpu_count(),
'model': platform.processor()
},
'runtime': {
'python': sys.version,
'event_loop': self._session.cluster.connection_class.__name__
}
},
'periodicStatusInterval': self._interval
}
}
def stop(self):
log.debug("Shutting down Monitor Reporter")
self._shutdown_event.set()
self.join()
|
datastax/python-driver
|
cassandra/datastax/insights/reporter.py
|
Python
|
apache-2.0
| 9,149
|
class CacheManager(object):
"""
Handle storing the DMCache objects. We want the use the same
object for each cache_key.
Note: This original was just a module level dict
"""
def __init__(self, cache_cls=None):
if cache_cls is None:
cache_cls = DMCache
self.cache_cls = cache_cls
self.caches = {}
def __setitem__(self, name, obj):
self.caches[name] = obj
def get(self, name):
"""
"""
if name not in self.caches:
cache = self.cache_cls(name)
self[name] = cache
return self.caches.get(name)
class DMCache(object):
"""
Object that stores variables for a DataModule
Essentially a dict-like structure with a sync method
"""
def __init__(self, cache_key):
self.cache_key = cache_key
self._vars = {}
def sync(self, vars, config):
"""
Sync to caches to the new vars. For a dict backend,
it's a simple replace.
For filebackends, this gets more complicated as you don't
want to serialize cache values that don't change
"""
self._vars = vars
def iteritems(self):
return iter(list(self._vars.items()))
items = iteritems
def keys(self):
return list(self._vars.keys())
def __contains__(self, key):
return key in self._vars
def __getitem__(self, key):
return self._vars[key]
|
dalejung/datamodule
|
datamodule/cache.py
|
Python
|
mit
| 1,452
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from oslo_log import log as logging
from conveyor.api import extensions
from conveyor.api.wsgi import wsgi
from conveyor.clone import api
from conveyor.common import plan_status as p_status
from conveyor.db import api as db_api
from conveyor.plan import api as plan_api
from conveyor.i18n import _
LOG = logging.getLogger(__name__)
class PlansActionController(wsgi.Controller):
def __init__(self, ext_mgr=None, *args, **kwargs):
super(PlansActionController, self).__init__(*args, **kwargs)
self.clone_api = api.API()
self.plan_api = plan_api.PlanAPI()
self.ext_mgr = ext_mgr
@wsgi.response(202)
@wsgi.action('download_template')
def _download_template(self, req, id, body):
LOG.debug("download template of plan %s start in API from template",
id)
context = req.environ['conveyor.context']
plan = db_api.plan_get(context, id)
plan_status = plan['plan_status']
if plan_status not in (p_status.AVAILABLE, p_status.CLONING,
p_status.MIGRATING, p_status.FINISHED):
msg = _("the plan %(plan_id)s in state %(state)s"
"can't download template") % {
'plan_id': id,
'state': plan_status,
}
raise exc.HTTPBadRequest(explanation=msg)
content = self.clone_api.download_template(context, id)
LOG.debug('the content is %s' % content)
return content
@wsgi.response(202)
@wsgi.action('os-reset_state')
def _reset_state(self, req, id, body):
LOG.debug("Start reset plan state in API for plan: %s", id)
if not self.is_valid_body(body, 'os-reset_state'):
LOG.error("Reset plan state request body has not key:\
os-reset_state")
raise exc.HTTPUnprocessableEntity()
context = req.environ['conveyor.context']
update = body.get('os-reset_state')
self.plan_api.update_plan(context, id, update)
LOG.debug("End reset plan state in API for plan: %s", id)
return {'plan_id': id, 'plan_status': update.get('plan_status')}
@wsgi.response(202)
@wsgi.action('force_delete-plan')
def _force_delete_plan(self, req, id, body):
if not self.is_valid_body(body, 'force_delete-plan'):
LOG.error('Force delete plan request body has not key:'
'force_delete-plan')
raise exc.HTTPUnprocessableEntity()
context = req.environ['conveyor.context']
plan_id = body.get('force_delete-plan', {}).get('plan_id', None)
self.plan_api.force_delete_plan(context, plan_id)
@wsgi.response(202)
@wsgi.action('plan-delete-resource')
def _plan_delete_resource(self, req, id, body):
if not self.is_valid_body(body, 'plan-delete-resource'):
LOG.error('Delete plan resource request body has not key:'
'plan-delete-resource')
raise exc.HTTPUnprocessableEntity()
context = req.environ['conveyor.context']
plan_id = body.get('plan-delete-resource', {}).get('plan_id', None)
self.plan_api.plan_delete_resource(context, plan_id)
class Plan(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "Plan"
alias = "conveyor-plan"
namespace = "http://docs.openstack.org/conveyor/ext/plan/api/v1"
updated = "2016-01-29T00:00:00+00:00"
# extend exist resource
def get_controller_extensions(self):
controller = PlansActionController(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'plans', controller)
return [extension]
|
Hybrid-Cloud/conveyor
|
conveyor/api/contrib/plan.py
|
Python
|
apache-2.0
| 4,393
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-22 21:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('examinations', '0004_auto_20170323_0258'),
]
operations = [
migrations.AlterField(
model_name='roomassignment',
name='date',
field=models.DateField(blank=True, null=True),
),
]
|
dhavalmanjaria/dma-student-information-system
|
examinations/migrations/0005_auto_20170323_0258.py
|
Python
|
gpl-2.0
| 470
|
"""py_deps.tests package."""
|
mkouhei/py-deps
|
py_deps/tests/__init__.py
|
Python
|
gpl-3.0
| 29
|
#!/usr/bin/python
from peewee import *
db = MySQLDatabase("ifc", host="127.0.0.1", port=3306, user="dario", passwd="SecurePassword08!")
class BaseModel(Model):
""" Base Model which is extended in each model """
class Meta:
database = db
class Author(BaseModel):
""" Author information """
name = CharField(unique=True)
class Stock(BaseModel):
""" Stock name and ticker symbol """
ticker = CharField(unique=True)
name = CharField(unique=True)
class StockFeature(BaseModel):
""" Model storing info about stock prices """
id = IntegerField()
stock = ForeignKeyField(Stock, related_name='stock_name')
date = DateField()
high = DecimalField()
low = DecimalField()
volume = DecimalField()
opening = DecimalField()
closing = DecimalField()
rsi = DecimalField()
macd = DecimalField()
sma = DecimalField()
ema = DecimalField()
class Meta:
primary_key = CompositeKey('id', 'stock', 'date')
""" I'm not sure if we need this, I think it's just a join and filter
class ArticleFeatureStockFeature(BaseModel):
article =
stock =
date = DateField()
class Meta:
primary_key = CompositeKey(
"""
class Article(BaseModel):
""" Contains content from an article """
author = ForeignKeyField(Author, related_name='author')
date = DateField()
title = CharField(unique=True)
content = TextField()
source = CharField()
class ArticleFeature(BaseModel):
""" Relationship for showing which article relates to the extracted feasture """
article = ForeignKeyField(Article, related_name="article", primary_key=True)
positive = DecimalField()
neutral = DecimalField()
negative = DecimalField()
compound = DecimalField()
other = TextField()
class StockArticle(BaseModel):
""" many-to-many relationship showing which stocks were found in an article """
stock = ForeignKeyField(Stock)
article = ForeignKeyField(Article)
if __name__ == "__main__":
try:
Author.create_table()
except OperationalError:
print "Author table already exists"
try:
Stock.create_table()
except OperationalError:
print "Stock table already exists"
try:
StockFeature.create_table()
except OperationalError:
print "Stock_feature table already exists"
try:
Article.create_table()
except OperationalError:
print "Article table already exists"
try:
StockArticle.create_table()
except OperationalError:
print "Stock_article table already exists"
try:
ArticleFeature.create_table()
except OperationalError:
print "Article_feature table already exists"
#try:
# ArticleFeatureStockFeature.create_table()
#except OperationalError:
# print "Article_feature_stock_feature table already exists"
|
Darthone/bug-free-octo-parakeet
|
src/ifc/db.py
|
Python
|
mit
| 3,037
|
# Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
Defines a number of functions for pseudo-command-line OS functionality.
cd(directory)
pwd <-- can be used WITHOUT parens
ls(d='.')
rename(from,to)
get(namepatterns,verbose=1)
getstrings(namepatterns,verbose=1)
put(outlist,filename,writetype='w')
aget(namepatterns,verbose=1)
aput(outarray,filename,writetype='w')
bget(filename,numslices=1,xsize=64,ysize=64)
braw(filename,btype)
bput(outarray,filename,writeheader=0,packstring='h',writetype='wb')
mrget(filename)
find_dirs(sourcedir)
"""
## CHANGES:
## =======
## 02-11-20 ... added binget(), binput(), array2afni(), version 0.5
## 02-10-20 ... added find_dirs() function, changed version to 0.4
## 01-11-15 ... changed aput() and put() to accept a delimiter
## 01-04-19 ... added oneperline option to put() function
## 99-11-07 ... added DAs quick flat-text-file loaders, load() and fload()
## 99-11-01 ... added version number (0.1) for distribution
## 99-08-30 ... Put quickload in here
## 99-06-27 ... Changed bget thing back ... confused ...
## 99-06-24 ... exchanged xsize and ysize in bget for non-square images (NT??)
## modified bget to raise an IOError when file not found
## 99-06-12 ... added load() and save() aliases for aget() and aput() (resp.)
## 99-04-13 ... changed aget() to ignore (!!!!) lines beginning with # or %
## 99-01-17 ... changed get() so ints come in as ints (not floats)
##
try:
import mmapfile
except:
pass
import pstat
import glob, re, string, types, os, Numeric, struct, copy, time, tempfile, sys
from types import *
N = Numeric
__version__ = 0.5
def wrap(f):
"""
Wraps a function so that if it's entered *by itself*
in the interpreter without ()'s, it gets called anyway
"""
class W:
def __init__(self, f):
self.f = f
def __repr__(self):
x =apply(self.f)
if x:
return repr(x)
else:
return ''
return W(f)
def cd (directory):
"""
Changes the working python directory for the interpreter.
Usage: cd(directory) where 'directory' is a string
"""
os.chdir(directory)
return
def pwd():
"""
Changes the working python directory for the interpreter.
Usage: pwd (no parens needed)
"""
return os.getcwd()
pwd = wrap(pwd)
def ls(d='.'):
"""
Produces a directory listing. Default is the current directory.
Usage: ls(d='.')
"""
os.system('ls '+d)
return None
def rename(source, dest):
"""
Renames files specified by UNIX inpattern to those specified by UNIX
outpattern. Can only handle a single '*' in the two patterns!!!
Usage: rename (source, dest) e.g., rename('*.txt', '*.c')
"""
infiles = glob.glob(source)
outfiles = []
incutindex = string.index(source,'*')
outcutindex = string.index(source,'*')
findpattern1 = source[0:incutindex]
findpattern2 = source[incutindex+1:]
replpattern1 = dest[0:incutindex]
replpattern2 = dest[incutindex+1:]
for fname in infiles:
if incutindex > 0:
newname = re.sub(findpattern1,replpattern1,fname,1)
if outcutindex < len(dest)-1:
if incutindex > 0:
lastone = string.rfind(newname,replpattern2)
newname = newname[0:lastone] + re.sub(findpattern2,replpattern2,fname[lastone:],1)
else:
lastone = string.rfind(fname,findpattern2)
if lastone <> -1:
newname = fname[0:lastone]
newname = newname + re.sub(findpattern2,replpattern2,fname[lastone:],1)
os.rename(fname,newname)
return
def get (namepatterns,verbose=1):
"""
Loads a list of lists from text files (specified by a UNIX-style
wildcard filename pattern) and converts all numeric values to floats.
Uses the glob module for filename pattern conversion. Loaded filename
is printed if verbose=1.
Usage: get (namepatterns,verbose=1)
Returns: a 1D or 2D list of lists from whitespace delimited text files
specified by namepatterns; numbers that can be converted to floats
are so converted
"""
fnames = []
if type(namepatterns) in [ListType,TupleType]:
for item in namepatterns:
fnames = fnames + glob.glob(item)
else:
fnames = glob.glob(namepatterns)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepatterns+') !!'
return None
if verbose:
print fnames # so user knows what has been loaded
elements = []
for i in range(len(fnames)):
file = open(fnames[i])
newelements = map(string.split,file.readlines())
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atoi(newelements[i][j])
except ValueError:
try:
newelements[i][j] = string.atof(newelements[i][j])
except:
pass
elements = elements + newelements
if len(elements)==1: elements = elements[0]
return elements
def getstrings (namepattern,verbose=1):
"""
Loads a (set of) text file(s), with all elements left as string type.
Uses UNIX-style wildcards (i.e., function uses glob). Loaded filename
is printed if verbose=1.
Usage: getstrings (namepattern, verbose=1)
Returns: a list of strings, one per line in each text file specified by
namepattern
"""
fnames = glob.glob(namepattern)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepattern+') !!'
return None
if verbose:
print fnames
elements = []
for filename in fnames:
file = open(filename)
newelements = map(string.split,file.readlines())
elements = elements + newelements
return elements
def put (outlist,fname,writetype='w',oneperline=0,delimit=' '):
"""
Writes a passed mixed-type list (str and/or numbers) to an output
file, and then closes the file. Default is overwrite the destination
file.
Usage: put (outlist,fname,writetype='w',oneperline=0,delimit=' ')
Returns: None
"""
if type(outlist) in [N.ArrayType]:
aput(outlist,fname,writetype)
return
if type(outlist[0]) not in [ListType,TupleType]: # 1D list
outfile = open(fname,writetype)
if not oneperline:
outlist = pstat.list2string(outlist,delimit)
outfile.write(outlist)
outfile.write('\n')
else: # they want one element from the list on each file line
for item in outlist:
outfile.write(str(item)+'\n')
outfile.close()
else: # 2D list (list-of-lists)
outfile = open(fname,writetype)
for row in outlist:
outfile.write(pstat.list2string(row,delimit))
outfile.write('\n')
outfile.close()
return None
def isstring(x):
if type(x)==StringType:
return 1
else:
return 0
def aget (namepattern,verbose=1):
"""
Loads an array from 2D text files (specified by a UNIX-style wildcard
filename pattern). ONLY 'GET' FILES WITH EQUAL NUMBERS OF COLUMNS
ON EVERY ROW (otherwise returned array will be zero-dimensional).
Usage: aget (namepattern)
Returns: an array of integers, floats or objects (type='O'), depending on the
contents of the files specified by namepattern
"""
fnames = glob.glob(namepattern)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepattern+') !!'
return None
if verbose:
print fnames
elements = []
for filename in fnames:
file = open(filename)
newelements = file.readlines()
del_list = []
for row in range(len(newelements)):
if (newelements[row][0]=='%' or newelements[row][0]=='#'
or len(newelements[row])==1):
del_list.append(row)
del_list.reverse()
for i in del_list:
newelements.pop(i)
newelements = map(string.split,newelements)
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atof(newelements[i][j])
except:
pass
elements = elements + newelements
for row in range(len(elements)):
if N.add.reduce(N.array(map(isstring,elements[row])))==len(elements[row]):
print "A row of strings was found. Returning a LIST."
return elements
try:
elements = N.array(elements)
except TypeError:
elements = N.array(elements,'O')
return elements
def aput (outarray,fname,writetype='w',delimit=' '):
"""
Sends passed 1D or 2D array to an output file and closes the file.
Usage: aput (outarray,fname,writetype='w',delimit=' ')
Returns: None
"""
outfile = open(fname,writetype)
if len(outarray.shape) == 1:
outarray = outarray[N.NewAxis,:]
if len(outarray.shape) > 2:
raise TypeError, "put() and aput() require 1D or 2D arrays. Otherwise use some kind of pickling."
else: # must be a 2D array
for row in outarray:
outfile.write(string.join(map(str,row),delimit))
outfile.write('\n')
outfile.close()
return None
def bget(imfile,shp=None,unpackstr=N.Int16,bytesperpixel=2.0,sliceinit=0):
"""
Reads in a binary file, typically with a .bshort or .bfloat extension.
If so, the last 3 parameters are set appropriately. If not, the last 3
parameters default to reading .bshort files (2-byte integers in big-endian
binary format).
Usage: bget(imfile,shp=None,unpackstr=N.Int16,bytesperpixel=2.0,sliceinit=0)
"""
if imfile[:3] == 'COR':
return CORget(imfile)
if imfile[-2:] == 'MR':
return mrget(imfile,unpackstr)
if imfile[-4:] == 'BRIK':
return brikget(imfile,unpackstr,shp)
if imfile[-3:] in ['mnc','MNC']:
return mincget(imfile,unpackstr,shp)
if imfile[-3:] == 'img':
return mghbget(imfile,unpackstr,shp)
if imfile[-6:] == 'bshort' or imfile[-6:] == 'bfloat':
if shp == None:
return mghbget(imfile,unpackstr=unpackstr,bytesperpixel=bytesperpixel,sliceinit=sliceinit)
else:
return mghbget(imfile,shp[0],shp[1],shp[2],unpackstr,bytesperpixel,sliceinit)
def CORget(infile):
"""
Reads a binary COR-nnn file (flattening file).
Usage: CORget(imfile)
Returns: 2D array of 16-bit ints
"""
d=braw(infile,N.Int8)
d.shape = (256,256)
d = N.where(N.greater_equal(d,0),d,256+d)
return d
def mincget(imfile,unpackstr=N.Int16,shp=None):
"""
Loads in a .MNC file.
Usage: mincget(imfile,unpackstr=N.Int16,shp=None) default shp = -1,20,64,64
"""
if shp == None:
shp = (-1,20,64,64)
os.system('mincextract -short -range 0 4095 -image_range 0 4095 ' +
imfile+' > minctemp.bshort')
try:
d = braw('minctemp.bshort',unpackstr)
except:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in mincget()"
print shp, d.shape
d.shape = shp
os.system('rm minctemp.bshort')
return d
def brikget(imfile,unpackstr=N.Int16,shp=None):
"""
Gets an AFNI BRIK file.
Usage: brikget(imfile,unpackstr=N.Int16,shp=None) default shp: (-1,48,61,51)
"""
if shp == None:
shp = (-1,48,61,51)
try:
file = open(imfile, "rb")
except:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in brikget()"
try:
header = imfile[0:-4]+'HEAD'
lines = open(header).readlines()
for i in range(len(lines)):
if string.find(lines[i],'DATASET_DIMENSIONS') <> -1:
dims = string.split(lines[i+2][0:string.find(lines[i+2],' 0')])
dims = map(string.atoi,dims)
if string.find(lines[i],'BRICK_FLOAT_FACS') <> -1:
count = string.atoi(string.split(lines[i+1])[2])
mults = []
for j in range(int(N.ceil(count/5.))):
mults += map(string.atof,string.split(lines[i+2+j]))
mults = N.array(mults)
dims.reverse()
shp = [-1]+dims
except IOError:
print "No header file. Continuing ..."
lines = None
print shp
print 'Using unpackstr:',unpackstr #,', bytesperpixel=',bytesperpixel
file = open(imfile, "rb")
bdata = file.read()
# the > forces big-endian (for or from Sun/SGI)
bdata = N.fromstring(bdata,unpackstr)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
if (littleEndian and os.uname()[0]<>'Linux') or (max(bdata)>1e30):
bdata = bdata.byteswapped()
try:
bdata.shape = shp
except:
print 'Incorrect shape ...',shp,len(bdata)
raise ValueError, 'Incorrect shape for file size'
if len(bdata) == 1:
bdata = bdata[0]
if N.sum(mults) == 0:
return bdata
try:
multshape = [1]*len(bdata.shape)
for i in range(len(bdata.shape)):
if len(mults) == bdata.shape[i]:
multshape[i] = len(mults)
break
mults.shape = multshape
return bdata*mults
except:
return bdata
def mghbget(imfile,numslices=-1,xsize=64,ysize=64,
unpackstr=N.Int16,bytesperpixel=2.0,sliceinit=0):
"""
Reads in a binary file, typically with a .bshort or .bfloat extension.
If so, the last 3 parameters are set appropriately. If not, the last 3
parameters default to reading .bshort files (2-byte integers in big-endian
binary format).
Usage: mghbget(imfile, numslices=-1, xsize=64, ysize=64,
unpackstr=N.Int16, bytesperpixel=2.0, sliceinit=0)
"""
try:
file = open(imfile, "rb")
except:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in bget()"
try:
header = imfile[0:-6]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
xsize = int(vals[0])
ysize = int(vals[1])
numslices = int(vals[2])
except:
print "No header file. Continuing ..."
suffix = imfile[-6:]
if suffix == 'bshort':
pass
elif suffix[-3:] == 'img':
pass
elif suffix == 'bfloat':
unpackstr = N.Float32
bytesperpixel = 4.0
sliceinit = 0.0
else:
print 'Not a bshort, bfloat or img file.'
print 'Using unpackstr:',unpackstr,', bytesperpixel=',bytesperpixel
imsize = xsize*ysize
file = open(imfile, "rb")
bdata = file.read()
numpixels = len(bdata) / bytesperpixel
if numpixels%1 != 0:
raise ValueError, "Incorrect file size in fmri.bget()"
else: # the > forces big-endian (for or from Sun/SGI)
bdata = N.fromstring(bdata,unpackstr)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# bdata = bdata.byteswapped()
if (littleEndian and os.uname()[0]<>'Linux') or (max(bdata)>1e30):
bdata = bdata.byteswapped()
if suffix[-3:] == 'img':
if numslices == -1:
numslices = len(bdata)/8200 # 8200=(64*64*2)+8 bytes per image
xsize = 64
ysize = 128
slices = N.zeros((numslices,xsize,ysize),N.Int)
for i in range(numslices):
istart = i*8 + i*xsize*ysize
iend = i*8 + (i+1)*xsize*ysize
print i, istart,iend
slices[i] = N.reshape(N.array(bdata[istart:iend]),(xsize,ysize))
else:
if numslices == 1:
slices = N.reshape(N.array(bdata),[xsize,ysize])
else:
slices = N.reshape(N.array(bdata),[numslices,xsize,ysize])
if len(slices) == 1:
slices = slices[0]
return slices
def braw(fname,btype,shp=None):
"""
Opens a binary file, unpacks it, and returns a flat array of the
type specified. Use Numeric types ... N.Float32, N.Int64, etc.
Usage: braw(fname,btype,shp=None)
Returns: flat array of floats, or ints (if btype=N.Int16)
"""
file = open(fname,'rb')
bdata = file.read()
bdata = N.fromstring(bdata,btype)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# bdata = bdata.byteswapped() # didn't used to need this with '>' above
if (littleEndian and os.uname()[0]<>'Linux') or (max(bdata)>1e30):
bdata = bdata.byteswapped()
if shp:
try:
bdata.shape = shp
return bdata
except:
pass
return N.array(bdata)
def glget(fname,btype):
"""
Load in a file containing pixels from glReadPixels dump.
Usage: glget(fname,btype)
Returns: array of 'btype elements with shape 'shape', suitable for im.ashow()
"""
d = braw(fname,btype)
d = d[8:]
f = open(fname,'rb')
shp = f.read(8)
f.close()
shp = N.fromstring(shp,N.Int)
shp[0],shp[1] = shp[1],shp[0]
try:
carray = N.reshape(d,shp)
return
except:
pass
try:
r = d[0::3]+0
g = d[1::3]+0
b = d[2::3]+0
r.shape = shp
g.shape = shp
b.shape = shp
carray = N.array([r,g,b])
except:
outstr = "glget: shape not correct for data of length "+str(len(d))
raise ValueError, outstr
return carray
def mget(fname,btype):
"""
Load in a file that was saved from matlab
Usage: mget(fname,btype)
"""
d = braw(fname,btype)
try:
header = fname[0:-6]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
xsize = int(vals[0])
ysize = int(vals[1])
numslices = int(vals[2])
print xsize,ysize,numslices, d.shape
except:
print "No header file. Continuing ..."
if numslices == 1:
d.shape = [ysize,xsize]
return N.transpose(d)*1
else:
d.shape = [numslices,ysize,xsize]
return N.transpose(d)*1
def mput(outarray,fname,writeheader=0,btype=N.Int16):
"""
Save a file for use in matlab.
"""
outarray = N.transpose(outarray)
outdata = N.ravel(outarray).astype(btype)
outdata = outdata.tostring()
outfile = open(fname,'wb')
outfile.write(outdata)
outfile.close()
if writeheader == 1:
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex]
except ValueError:
hdrname = fname
if len(outarray.shape) == 2:
hdr = [outarray.shape[1],outarray.shape[0], 1, 0]
else:
hdr = [outarray.shape[2],outarray.shape[1],outarray.shape[0], 0,'\n']
print hdrname+'.hdr'
outfile = open(hdrname+'.hdr','w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def bput(outarray,fname,writeheader=0,packtype=N.Int16,writetype='wb'):
"""
Writes the passed array to a binary output file, and then closes
the file. Default is overwrite the destination file.
Usage: bput (outarray,filename,writeheader=0,packtype=N.Int16,writetype='wb')
"""
suffix = fname[-6:]
if suffix == 'bshort':
packtype = N.Int16
elif suffix == 'bfloat':
packtype = N.Float32
else:
print 'Not a bshort or bfloat file. Using packtype=',packtype
outdata = N.ravel(outarray).astype(packtype)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
if littleEndian and os.uname()[0]<>'Linux':
outdata = outdata.byteswapped()
outdata = outdata.tostring()
outfile = open(fname,writetype)
outfile.write(outdata)
outfile.close()
if writeheader == 1:
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex]
except ValueError:
hdrname = fname
if len(outarray.shape) == 2:
hdr = [outarray.shape[0],outarray.shape[1], 1, 0]
else:
hdr = [outarray.shape[1],outarray.shape[2],outarray.shape[0], 0,'\n']
print hdrname+'.hdr'
outfile = open(hdrname+'.hdr','w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def mrget(fname,datatype=N.Int16):
"""
Opens a binary .MR file and clips off the tail data portion of it, returning
the result as an array.
Usage: mrget(fname,datatype=N.Int16)
"""
d = braw(fname,datatype)
if len(d) > 512*512:
return N.reshape(d[-512*512:],(512,512))
elif len(d) > 256*256:
return N.reshape(d[-256*256:],(256,256))
elif len(d) > 128*128:
return N.reshape(d[-128*128:],(128,128))
elif len(d) > 64*64:
return N.reshape(d[-64*64:],(64,64))
else:
return N.reshape(d[-32*32:],(32,32))
def quickload(fname,linestocut=4):
"""
Quickly loads in a long text file, chopping off first n 'linestocut'.
Usage: quickload(fname,linestocut=4)
Returns: array filled with data in fname
"""
f = open(fname,'r')
d = f.readlines()
f.close()
print fname,'read in.'
d = d[linestocut:]
d = map(string.split,d)
print 'Done with string.split on lines.'
for i in range(len(d)):
d[i] = map(string.atoi,d[i])
print 'Conversion to ints done.'
return N.array(d)
def writedelimited (listoflists, delimiter, file, writetype='w'):
"""
Writes a list of lists in columns, separated by character(s) delimiter
to specified file. File-overwrite is the default.
Usage: writedelimited (listoflists,delimiter,filename,writetype='w')
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '------'
outfile.write(pstat.linedelimited(dashes,delimiter))
else:
outfile.write(pstat.linedelimited(row,delimiter))
outfile.write('\n')
outfile.close()
return None
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def writefc (listoflists,colsize,file,writetype='w'):
"""
Writes a list of lists to a file in columns of fixed size. File-overwrite
is the default.
Usage: writefc (listoflists,colsize,file,writetype='w')
Returns: None
"""
if type(listoflists) == N.ArrayType:
listoflists = listoflists.tolist()
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
n = [0]*len(list2print[0])
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*colsize
for j in range(len(n)):
dashes[j] = '-'*(colsize)
outfile.write(pstat.lineincols(dashes,colsize))
else:
outfile.write(pstat.lineincols(row,colsize))
outfile.write('\n')
outfile.close()
return None
def load(fname,lines_to_ignore=4,type='i'):
"""
Load in huge, flat, 2D text files. Can handle differing line-lengths AND
can strip #/% on UNIX (or with a better NT grep). Requires wc, grep, and
mmapfile.lib/.pyd. Type can be 'i', 'f' or 'd', for ints, floats or doubles,
respectively. Lines_to_ignore determines how many lines at the start of the
file to ignore (required for non-working grep).
Usage: load(fname,lines_to_ignore=4,type='i')
Returns: numpy array of specified type
"""
start = time.time() ## START TIMER
if type == 'i':
intype = int
elif type in ['f','d']:
intype = float
else:
raise ValueError, "type can be 'i', 'f' or 'd' in load()"
## STRIP OUT % AND # LINES
tmpname = tempfile.mktemp()
if sys.platform == 'win32':
# NT VERSION OF GREP DOESN'T DO THE STRIPPING ... SIGH
cmd = "grep.exe -v \'%\' "+fname+" > "+tmpname
print cmd
os.system(cmd)
else:
# UNIX SIDE SHOULD WORK
cmd = "cat "+fname+" | grep -v \'%\' |grep -v \'#\' > "+tmpname
print cmd
os.system(cmd)
## GET NUMBER OF ROWS, COLUMNS AND LINE-LENGTH, USING WC
wc = string.split(os.popen("wc "+tmpname).read())
numlines = int(wc[0]) - lines_to_ignore
tfp = open(tmpname)
if lines_to_ignore <> 0:
for i in range(lines_to_ignore):
junk = tfp.readline()
numcols = len(string.split(tfp.readline())) #int(float(wc[1])/numlines)
tfp.close()
## PREPARE INPUT SPACE
a = N.zeros((numlines*numcols), type)
block = 65536 # chunk to read, in bytes
data = mmapfile.mmapfile(tmpname, '', 0)
if lines_to_ignore <> 0 and sys.platform == 'win32':
for i in range(lines_to_ignore):
junk = data.readline()
i = 0
d = ' '
carryover = ''
while len(d) <> 0:
d = carryover + data.read(block)
cutindex = string.rfind(d,'\n')
carryover = d[cutindex+1:]
d = d[:cutindex+1]
d = map(intype,string.split(d))
a[i:i+len(d)] = d
i = i + len(d)
end = time.time()
print "%d sec" % round(end-start,2)
data.close()
os.remove(tmpname)
return N.reshape(a,[numlines,numcols])
def find_dirs(sourcedir):
"""Finds and returns all directories in sourcedir
Usage: find_dirs(sourcedir)
Returns: list of directory names (potentially empty)
"""
files = os.listdir(sourcedir)
dirs = []
for fname in files:
if os.path.isdir(os.path.join(sourcedir,fname)):
dirs.append(fname)
return dirs
# ALIASES ...
save = aput
def binget(fname,btype=None):
"""
Loads a binary file from disk. Assumes associated hdr file is in same
location. You can force an unpacking type, or else it tries to figure
it out from the filename (4th-to-last character). Hence, readable file
formats are ...
1bin=Int8, sbin=Int16, ibin=Int32, fbin=Float32, dbin=Float64, etc.
Usage: binget(fname,btype=None)
Returns: data in file fname of type btype
"""
file = open(fname,'rb')
bdata = file.read()
file.close()
# if none given, assume character preceeding 'bin' is the unpacktype
if not btype:
btype = fname[-4]
try:
bdata = N.fromstring(bdata,btype)
except:
raise ValueError, "Bad unpacking type."
# force the data on disk to be LittleEndian (for more efficient PC/Linux use)
if not N.LittleEndian:
bdata = bdata.byteswapped()
try:
header = fname[:-3]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
print vals
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
bdata.shape = vals
except:
print "No (or bad) header file. Returning unshaped array."
return N.array(bdata)
def binput(outarray,fname,packtype=None,writetype='wb'):
"""
Unravels outarray and writes the data to a file, always in LittleEndian
format, along with a header file containing the original data shape. Default
is overwrite the destination file. Tries to figure out packtype from
4th-to-last character in filename. Thus, the routine understands these
file formats ...
1bin=Int8, sbin=Int16, ibin=Int32, fbin=Float32, dbin=Float64, etc.
Usage: binput(outarray,filename,packtype=None,writetype='wb')
"""
if not packtype:
packtype = fname[-4]
# a speck of error checking
if packtype == N.Int16 and outarray.typecode() == 'f':
# check to see if there's data loss
if max(N.ravel(outarray)) > 32767 or min(N.ravel(outarray))<-32768:
print "*** WARNING: CONVERTING FLOAT DATA TO OUT-OF RANGE INT16 DATA"
outdata = N.ravel(outarray).astype(packtype)
# force the data on disk to be LittleEndian (for more efficient PC/Linux use)
if not N.LittleEndian:
outdata = outdata.byteswapped()
outdata = outdata.tostring()
outfile = open(fname,writetype)
outfile.write(outdata)
outfile.close()
# Now, write the header file
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex+2]+'hdr' # include .s or .f or .1 or whatever
except ValueError:
hdrname = fname
hdr = outarray.shape
print hdrname
outfile = open(hdrname,'w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def array2afni(d,brikprefix,voltype=None,TR=2.0,sliceorder='seqplus',geomparent=None,view=None):
"""
Converts an array 'd' to an AFNI BRIK/HEAD combo via putbin and to3d. Tries to
guess the AFNI volume type
voltype = {'-anat','-epan','-fim'}
geomparent = filename of the afni BRIK file with the same geometry
view = {'tlrc', 'acpc' or 'orig'}
Usage: array2afni(d,brikprefix,voltype=None,TR=2.0,
sliceorder='seqplus',geomparent=None,view=None)
Returns: None
"""
# converts Numeric typecode()s into appropriate strings for to3d command line
typecodemapping = {'c':'b', # character
'b':'b', # UnsignedInt8
'f':'f', # Float0, Float8, Float16, Float32
'd':'f', # Float64
'1':'b', # Int0, Int8
's':'', # Int16
'i':'i', # Int32
'l':'i'} # Int
# Verify that the data is proper size (3- or 4-D)
if len(d.shape) not in [3,4]:
raise ValueError, "A 3D or 4D array is required for array2afni() ... %s" %d.shape
# Save out the array to a binary file, homebrew style
if d.typecode() == N.Float64:
outcode = 'f'
else:
outcode = d.typecode()
tmpoutname = 'afnitmp.%sbin' % outcode
binput(d.astype(outcode),tmpoutname)
if not voltype:
if len(d.shape) == 3: # either anatomy or functional
if d.typecode() in ['s','i','l']: # if floats, assume functional
voltype = '-anat'
else:
voltype = '-fim'
else: # 4D dataset, must be anatomical timeseries (epan)
voltype = '-anat'
if len(d.shape) == 3: # either anatomy or functional
timepts = 1
slices = d.shape[0]
timestr = ''
elif len(d.shape) == 4:
timepts = d.shape[0]
slices = d.shape[1]
timestr = '-time:zt %d %d %0.3f %s ' % (slices,timepts,TR,sliceorder)
cmd = 'to3d %s -prefix %s -session . ' % (voltype, brikprefix)
if view:
cmd += '-view %s ' % view
if geomparent:
cmd += '-geomparent %s ' % geomparent
cmd += timestr
cmd += '3D%s:0:0:%d:%d:%d:%s' % (typecodemapping[d.typecode()],d.shape[-1],d.shape[-2],slices*timepts,tmpoutname)
print cmd
os.system(cmd)
os.remove(tmpoutname)
os.remove(tmpoutname[:-3]+'hdr')
|
sniemi/SamPy
|
sandbox/src2/src/io.py
|
Python
|
bsd-2-clause
| 34,524
|
# -*- coding: utf-8 -*-
"""
Dydacomp MOM SQL Server Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013 by Mark A. Richman.
:license: GPL v2, see LICENSE for more details.
"""
__title__ = 'mom'
__version__ = '1.0.0'
__build__ = 0x010000
__author__ = 'Mark A. Richman'
__license__ = 'GPL v2'
__copyright__ = 'Copyright 2013 Mark A. Richman'
__all__ = ['client', 'models']
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
mrichman/mom
|
mom/__init__.py
|
Python
|
gpl-2.0
| 685
|
import sys
def add(core, actor, skillMod, divisor):
core.skillModService.addSkillMod(actor, 'electricity', skillMod)
return
def deduct(core, actor, skillMod, divisor):
core.skillModService.deductSkillMod(actor, 'electricity', skillMod)
return
|
agry/NGECore2
|
scripts/skillMods/expertise_innate_protection_electricity.py
|
Python
|
lgpl-3.0
| 250
|
# coding=utf-8
"""
Created on 28.8.2013
@author: purma
"""
from PyQt6 import QtGui, QtCore, QtWidgets
from kataja.singletons import ctrl
from kataja.uniqueness_generator import next_available_type_id
class GlowRing(QtWidgets.QGraphicsEllipseItem):
""" Decoration for radial menus """
__qt_type_id__ = next_available_type_id()
def __init__(self, parent, radius=40):
QtWidgets.QGraphicsEllipseItem.__init__(self, QtCore.QRectF(0, 0, 0, 0), parent)
pen = QtGui.QPen(ctrl.cm.ui())
pen.setWidth(4)
self.setPen(pen)
glow = QtWidgets.QGraphicsBlurEffect(parent)
glow.setBlurRadius(7)
glow.setEnabled(True)
self.setGraphicsEffect(glow)
self._radius = 0
self._max_radius = radius
self._step_size = radius / 6.0
def type(self):
""" Qt's type identifier, custom QGraphicsItems should have different type ids if events
need to differentiate between them. These are set when the program starts.
:return:
"""
return self.__qt_type_id__
def grow(self):
self._radius += self._step_size
self.setRect(-self._radius, -self._radius, 2 * self._radius, 2 * self._radius)
def shrink(self):
self.radius -= self.step_size
self.setRect(-self.radius, -self.radius, 2 * self.radius, 2 * self.radius)
|
jpurma/Kataja
|
kataja/ui_support/GlowRing.py
|
Python
|
gpl-3.0
| 1,364
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +---------------------------------------------------------------------+
# | ___ _ ____ _____ |
# | |_ _|___ ___ ___ __ _ ___| |_|___ \ |___ / __ __ |
# | | |/ __/ _ \/ __/ _` / __| __| __) | |_ \ \ \/ / |
# | | | (_| __/ (_| (_| \__ \ |_ / __/ _ ___) | > < |
# | |___\___\___|\___\__,_|___/\__|_____(_)____(_)_/\_\ |
# | |
# | |
# | Copyright Alejandro Olivan 2017 alex@alexolivan.com |
# +---------------------------------------------------------------------+
# | A Check_mk agent to monitor Icecast 2.3.x servers on Linux |
# | This file contains plugin agent definition. |
# +---------------------------------------------------------------------+
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import urllib2
import base64
import os
customers = (
{'host': '127.0.0.1', 'port': '8000', 'mountpoint':'myradio.mp3', 'adminuser': 'admin', 'adminpass': 'verysecret1'},
{'host': '127.0.0.1', 'port': '8000', 'mountpoint':'myradio.aac', 'adminuser': 'admin', 'adminpass': 'verysecret1'},
{'host': '127.0.0.1', 'port': '8002', 'mountpoint':'transport.mp3', 'adminuser': 'admin', 'adminpass': 'verysecret2'},
)
results = []
filelist = []
def downloadStatsfiles():
for d in customers:
url = "http://%s:%s/admin/stats?mount=/%s" % (d['host'], d['port'], d['mountpoint'])
filename = "/tmp/icecast-%s-%s-%s" % (d['host'], d['port'], d['mountpoint'])
request = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (d['adminuser'], d['adminpass'])).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
try:
s = urllib2.urlopen(request)
contents = s.read()
file = open(filename, 'w')
file.write(contents)
file.close()
filelist.append({'port': d['port'], 'filename': filename, 'mountpoint': d['mountpoint']})
except:
pass
def parseStatFile(port, file, mountpoint):
from xml.dom import minidom
xmldoc = minidom.parse(file)
totalsources = xmldoc.getElementsByTagName('sources')[0].firstChild.nodeValue
if totalsources > 0:
status = 1
for source in xmldoc.getElementsByTagName('source'):
if source.hasAttribute('mount') and source.getAttribute('mount') == "/%s" % mountpoint:
listenersElement = source.getElementsByTagName('listeners')
listeners = listenersElement[0].firstChild.nodeValue
peakListenerElement = source.getElementsByTagName('listener_peak')
listenerPeak = peakListenerElement[0].firstChild.nodeValue
else:
listeners = 0
listenerPeak = 0
else:
status = 0
listeners = 0
listenerPeak = 0
results.append((port, listeners, status, listenerPeak, mountpoint))
def parseStatFiles():
for file in filelist:
parseStatFile(file['port'], file['filename'], file['mountpoint'])
def printResults():
print('<<<icecast2>>>')
for result in results:
resultstr = ""
for item in result:
resultstr+=str(item) + " "
print(resultstr)
def deleteUsedFiles():
for file in filelist:
os.remove(file['filename'])
def mainFunction():
downloadStatsfiles()
parseStatFiles()
printResults()
deleteUsedFiles()
mainFunction()
|
alexolivan/check_mk_plugins
|
icecast2/agents/plugins/icecast2.py
|
Python
|
gpl-2.0
| 4,391
|
#!/usr/bin/python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t; python-indent: 4 -*-
"""
Role
====
Defines the basic mechanisms to have a plugin manager filter the
available list of plugins after locating them and before loading them.
One use fo this would be to prevent untrusted plugins from entering
the system.
To use it properly you must reimplement or monkey patch the
``IsPluginOk`` method, as in the following example::
# define a plugin manager (with you prefered options)
pm = PluginManager(...)
# decorate it with the Filtering mechanics
pm = FilteredPluginManager(pm)
# define a custom predicate that filters out plugins without descriptions
pm.isPluginOk = lambda x: x.description!=""
API
===
"""
from yapsy.IPlugin import IPlugin
from yapsy.PluginManagerDecorator import PluginManagerDecorator
class FilteredPluginManager(PluginManagerDecorator):
"""
Base class for decorators which filter the plugins list
before they are loaded.
"""
def __init__(self,
decorated_manager=None,
categories_filter={"Default":IPlugin},
directories_list=None,
plugin_info_ext="yapsy-plugin"):
"""
"""
# Create the base decorator class
PluginManagerDecorator.__init__(self,decorated_manager,
categories_filter,
directories_list,
plugin_info_ext)
# prepare the mapping of the latest version of each plugin
self.rejectedPlugins = [ ]
def filterPlugins(self):
"""
Go through the currently available candidates, and and either
leaves them, or moves them into the list of rejected Plugins.
Can be overridden if overriding ``isPluginOk`` sentinel is not
powerful enough.
"""
self.rejectedPlugins = [ ]
for candidate_infofile, candidate_filepath, plugin_info in self._component.getPluginCandidates():
if not self.isPluginOk( plugin_info):
self.rejectPluginCandidate((candidate_infofile, candidate_filepath, plugin_info) )
def rejectPluginCandidate(self,pluginTuple):
"""
Move a plugin from the candidates list to the rejected List.
"""
if pluginTuple in self.getPluginCandidates():
self._component.removePluginCandidate(pluginTuple)
if not pluginTuple in self.rejectedPlugins:
self.rejectedPlugins.append(pluginTuple)
def unrejectPluginCandidate(self,pluginTuple):
"""
Move a plugin from the rejected list to into the candidates
list.
"""
if not pluginTuple in self.getPluginCandidates():
self._component.appendPluginCandidate(pluginTuple)
if pluginTuple in self.rejectedPlugins:
self.rejectedPlugins.remove(pluginTuple)
def removePluginCandidate(self,pluginTuple):
"""
Remove a plugin from the list of candidates.
"""
if pluginTuple in self.getPluginCandidates():
self._component.removePluginCandidate(pluginTuple)
if pluginTuple in self.rejectedPlugins:
self.rejectedPlugins.remove(pluginTuple)
def appendPluginCandidate(self,pluginTuple):
"""
Add a new candidate.
"""
if self.isPluginOk(pluginTuple[2]):
if pluginTuple not in self.getPluginCandidates():
self._component.appendPluginCandidate(pluginTuple)
else:
if not pluginTuple in self.rejectedPlugins:
self.rejectedPlugins.append(pluginTuple)
def isPluginOk(self,info):
"""
Sentinel function to detect if a plugin should be filtered.
``info`` is an instance of a ``PluginInfo`` and this method is
expected to return True if the corresponding plugin can be
accepted, and False if it must be filtered out.
Subclasses should override this function and return false for
any plugin which they do not want to be loadable.
"""
return True
def locatePlugins(self):
"""
locate and filter plugins.
"""
#Reset Catalogue
self.setCategoriesFilter(self._component.categories_interfaces)
#Reread and filter.
self._component.locatePlugins()
self.filterPlugins()
return len(self._component.getPluginCandidates())
def getRejectedPlugins(self):
"""
Return the list of rejected plugins.
"""
return self.rejectedPlugins[:]
|
haiyamading/pyqt_tray
|
yapsy/FilteredPluginManager.py
|
Python
|
gpl-2.0
| 4,021
|
def Send_Certified_By_Automation():
import smtplib
from elan import ElanSettings
import os
from elan.ElanSettings import Script_Runner_Log
def send_email(subject,
body,
user='kennyshay123test@gmail.com',
pwd='corebrands123',
recipient = ["kennyshay123@gmail.com","elantestertools@gmail.com"]):
#recipient = ["kennyshay123@gmail.com","ben.bickell@corebrands.com","elantestertools@gmail.com"]):
gmail_user = user
gmail_pwd = pwd
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
server = smtplib.SMTP("smtp.gmail.com", 587)
#server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
print 'successfully sent the mail'
#######################################################BUild number
with open(ElanSettings.Build_File_List) as f:
Elan_Build_List = f.readlines()
Elan_Build_List = [x.strip() for x in Elan_Build_List]
Elan_Build_List.sort()
try:
Elan_Build = Elan_Build_List[-1]
except:
Elan_Build = 'None'
#print("Build->" + Elan_Build)
##########################################################
with open(Script_Runner_Log, 'r') as myfile:
body_log=myfile.read()
bodylogList = body_log.split('\n')
bodylogList = bodylogList[::-1]
body_log = "\n".join(str(x) for x in bodylogList)
body_Text = body_log + '\n' + str(os.environ['COMPUTERNAME'])
compName = str(os.environ['COMPUTERNAME'])
astring = "CERTIFIED ( " + Elan_Build + ' ) by ' + compName
send_email(astring,"\n" + body_Text)
Send_Certified_By_Automation()
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/elan/Certified_Build.py
|
Python
|
gpl-3.0
| 2,028
|
from django.db import models
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
import hashlib
import os
import string
#custom storage pulled from phoibos on stackoverflow.com
class MediaFileSystemStorage(FileSystemStorage):
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
# if the file exists, do not call teh superclasses _save method
return name
# if the file is new, DO call it
return super(MediaFileSystemStorage, self)._save(name, content)
def media_file_name(instance, filename):
# h = instance.md5sum
# h = hashlib.md5(instance.sample.read()).hexdigest()
h = instance.md5
basename, ext = os.path.splitext(filename)
basename = ''.join(e for e in basename if e.isalnum())
return os.path.join('sanalysis', 'static', 'samples', h, basename.lower() + ext.lower() + '.MAL')
def validate_ticket(value):
try:
int(value)
except ValueError:
raise ValidationError('Ticket must be numeric.')
class Sample(models.Model):
sample = models.FileField(
upload_to=media_file_name, storage=MediaFileSystemStorage())
#ticket = models.CharField(max_length=32, validators=[validate_ticket])
ticket = models.CharField(max_length=32)
filename = models.TextField(default='none')
size = models.IntegerField(default=0)
type = models.TextField(default='none')
md5 = models.CharField(max_length=32)
sha1 = models.CharField(max_length=40)
sha256 = models.CharField(max_length=64)
fuzzy = models.TextField(default='')
created = models.DateTimeField(
default=timezone.now)
# yara = models.TextField()
exif = models.TextField(default='')
strings = models.TextField(default='')
balbuzard = models.TextField(default='')
trid = models.TextField(default='')
peframe = models.TextField(default='')
pescanner = models.TextField(default='')
pdfid = models.TextField(default='')
pdf_strings = models.TextField(default='')
peepdf = models.TextField(default='')
oleid = models.TextField(default='')
olemeta = models.TextField(default='')
olevba = models.TextField(default='')
rtfobj = models.TextField(default='')
rtfobj_str = models.TextField(default='')
rtfobj_balbuz = models.TextField(default='')
ssdeep_compare = models.TextField(default='')
vt = models.TextField(default='')
vt_short = models.TextField(default='')
def __str__(self):
return self.filename
def __unicode__(self):
if isinstance(self.filename, unicode):
return self.filename
else:
return unicode(self.filename,'utf-8')
def save(self, *args, **kwargs):
if not self.pk: # file is new
md5 = hashlib.md5()
for chunk in self.sample.chunks():
md5.update(chunk)
self.md5sum = md5.hexdigest()
super(Sample, self).save(*args, **kwargs)
|
TheDr1ver/WIPSTER
|
wipster/sanalysis/models.py
|
Python
|
gpl-3.0
| 3,107
|
from setuptools import setup
from mpd_tag import VERSION
setup(
name = 'mpd-tag',
version = VERSION,
author = 'Anton Bobrov',
author_email = 'bobrov@vl.ru',
description = 'MPD tag manager',
#long_description = open('README.rst').read().replace('https', 'http'),
install_requires = ['python-mpd'],
zip_safe = False,
py_modules = ['mpd_tag'],
scripts = ['bin/mtag'],
include_package_data = True,
url = 'http://github.com/baverman/mpd-tag',
classifiers = [
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
],
)
|
baverman/mpd-tag
|
setup.py
|
Python
|
mit
| 749
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Javier Martínez García August 2015
from PySide import QtCore
from math import sin, cos, radians
# retrieve the objects from the document
slider_x = FreeCAD.ActiveDocument.getObject("Pad003002")
slider_y = FreeCAD.ActiveDocument.getObject("Pad003001")
arm = FreeCAD.ActiveDocument.getObject("Pad002001")
# store initial placement (needed to restore initial position)
slider_x_placement = slider_x.Placement
slider_y_placement = slider_y.Placement
arm_placement = arm.Placement
# store object placements in a new variable
r_slider_x_pl = slider_x.Placement
r_slider_y_pl = slider_y.Placement
r_arm_pl = arm.Placement
def reset():
# function to restore initial position of the objects
slider_x.Placement = r_slider_x_pl
slider_y.Placement = r_slider_y_pl
arm.Placement = r_arm_pl
# In this mechanism, "i" represents the angle of the rod in degrees
i = 0
# update function calculates object position as f(i) and increases i
def update():
global i
alpha = radians( i )
x = 150.0*cos( alpha )
y = 150.0*sin( alpha )
slider_x.Placement = FreeCAD.Placement( slider_x_placement.Base + FreeCAD.Vector( 150-x, 0, 0 ),
slider_x_placement.Rotation )
slider_y.Placement = FreeCAD.Placement( slider_y_placement.Base + FreeCAD.Vector( 0, y, 0 ),
slider_y_placement.Rotation )
arm.Placement = FreeCAD.Placement( arm_placement.Base + FreeCAD.Vector( 0, y, 0 ),
FreeCAD.Rotation( FreeCAD.Vector( 0,0,1), i))
# update the scene
FreeCAD.Gui.updateGui()
# increase mechanism input position
i += 1
# create a timer object
timer = QtCore.QTimer()
# connect timer event to function "update"
timer.timeout.connect( update )
# start the timer by triggering "update" every 10 ms
timer.start( 10 )
|
JMG1/FreeCAD-Double-Slider-Mechanism
|
DoubleSliderAnimation.py
|
Python
|
gpl-2.0
| 1,893
|
"""Mock helpers for Z-Wave component."""
from pydispatch import dispatcher
from tests.async_mock import MagicMock
def value_changed(value):
"""Fire a value changed."""
dispatcher.send(
MockNetwork.SIGNAL_VALUE_CHANGED,
value=value,
node=value.node,
network=value.node._network,
)
def node_changed(node):
"""Fire a node changed."""
dispatcher.send(MockNetwork.SIGNAL_NODE, node=node, network=node._network)
def notification(node_id, network=None):
"""Fire a notification."""
dispatcher.send(
MockNetwork.SIGNAL_NOTIFICATION, args={"nodeId": node_id}, network=network
)
class MockOption(MagicMock):
"""Mock Z-Wave options."""
def __init__(self, device=None, config_path=None, user_path=None, cmd_line=None):
"""Initialize a Z-Wave mock options."""
super().__init__()
self.device = device
self.config_path = config_path
self.user_path = user_path
self.cmd_line = cmd_line
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockNetwork(MagicMock):
"""Mock Z-Wave network."""
SIGNAL_NETWORK_FAILED = "mock_NetworkFailed"
SIGNAL_NETWORK_STARTED = "mock_NetworkStarted"
SIGNAL_NETWORK_READY = "mock_NetworkReady"
SIGNAL_NETWORK_STOPPED = "mock_NetworkStopped"
SIGNAL_NETWORK_RESETTED = "mock_DriverResetted"
SIGNAL_NETWORK_AWAKED = "mock_DriverAwaked"
SIGNAL_DRIVER_FAILED = "mock_DriverFailed"
SIGNAL_DRIVER_READY = "mock_DriverReady"
SIGNAL_DRIVER_RESET = "mock_DriverReset"
SIGNAL_DRIVER_REMOVED = "mock_DriverRemoved"
SIGNAL_GROUP = "mock_Group"
SIGNAL_NODE = "mock_Node"
SIGNAL_NODE_ADDED = "mock_NodeAdded"
SIGNAL_NODE_EVENT = "mock_NodeEvent"
SIGNAL_NODE_NAMING = "mock_NodeNaming"
SIGNAL_NODE_NEW = "mock_NodeNew"
SIGNAL_NODE_PROTOCOL_INFO = "mock_NodeProtocolInfo"
SIGNAL_NODE_READY = "mock_NodeReady"
SIGNAL_NODE_REMOVED = "mock_NodeRemoved"
SIGNAL_SCENE_EVENT = "mock_SceneEvent"
SIGNAL_VALUE = "mock_Value"
SIGNAL_VALUE_ADDED = "mock_ValueAdded"
SIGNAL_VALUE_CHANGED = "mock_ValueChanged"
SIGNAL_VALUE_REFRESHED = "mock_ValueRefreshed"
SIGNAL_VALUE_REMOVED = "mock_ValueRemoved"
SIGNAL_POLLING_ENABLED = "mock_PollingEnabled"
SIGNAL_POLLING_DISABLED = "mock_PollingDisabled"
SIGNAL_CREATE_BUTTON = "mock_CreateButton"
SIGNAL_DELETE_BUTTON = "mock_DeleteButton"
SIGNAL_BUTTON_ON = "mock_ButtonOn"
SIGNAL_BUTTON_OFF = "mock_ButtonOff"
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = "mock_EssentialNodeQueriesComplete"
SIGNAL_NODE_QUERIES_COMPLETE = "mock_NodeQueriesComplete"
SIGNAL_AWAKE_NODES_QUERIED = "mock_AwakeNodesQueried"
SIGNAL_ALL_NODES_QUERIED = "mock_AllNodesQueried"
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = "mock_AllNodesQueriedSomeDead"
SIGNAL_MSG_COMPLETE = "mock_MsgComplete"
SIGNAL_NOTIFICATION = "mock_Notification"
SIGNAL_CONTROLLER_COMMAND = "mock_ControllerCommand"
SIGNAL_CONTROLLER_WAITING = "mock_ControllerWaiting"
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
def __init__(self, options=None, *args, **kwargs):
"""Initialize a Z-Wave mock network."""
super().__init__()
self.options = options
self.state = MockNetwork.STATE_STOPPED
class MockNode(MagicMock):
"""Mock Z-Wave node."""
def __init__(
self,
*,
node_id=567,
name="Mock Node",
manufacturer_id="ABCD",
product_id="123",
product_type="678",
command_classes=None,
can_wake_up_value=True,
manufacturer_name="Test Manufacturer",
product_name="Test Product",
network=None,
**kwargs,
):
"""Initialize a Z-Wave mock node."""
super().__init__()
self.node_id = node_id
self.name = name
self.manufacturer_id = manufacturer_id
self.product_id = product_id
self.product_type = product_type
self.manufacturer_name = manufacturer_name
self.product_name = product_name
self.can_wake_up_value = can_wake_up_value
self._command_classes = command_classes or []
if network is not None:
self._network = network
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def has_command_class(self, command_class):
"""Test if mock has a command class."""
return command_class in self._command_classes
def get_battery_level(self):
"""Return mock battery level."""
return 42
def can_wake_up(self):
"""Return whether the node can wake up."""
return self.can_wake_up_value
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockValue(MagicMock):
"""Mock Z-Wave value."""
_mock_value_id = 1234
def __init__(
self,
*,
label="Mock Value",
node=None,
instance=0,
index=0,
value_id=None,
**kwargs,
):
"""Initialize a Z-Wave mock value."""
super().__init__()
self.label = label
self.node = node
self.instance = instance
self.index = index
if value_id is None:
MockValue._mock_value_id += 1
value_id = MockValue._mock_value_id
self.value_id = value_id
self.object_id = value_id
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
def refresh(self):
"""Mock refresh of node value."""
value_changed(self)
class MockEntityValues:
"""Mock Z-Wave entity values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.primary = None
self.wakeup = None
self.battery = None
self.power = None
for name in kwargs:
setattr(self, name, kwargs[name])
def __iter__(self):
"""Allow iteration over all values."""
return iter(self.__dict__.values())
|
sdague/home-assistant
|
tests/mock/zwave.py
|
Python
|
apache-2.0
| 6,380
|
#!/usr/bin/env python
'''Seizure figures for the I-surround with the original E-surround
configuration.'''
from __future__ import absolute_import, print_function, division
from grid_cell_model.submitting import flagparse
import noisefigs
from noisefigs.env import NoiseEnvironment
import config_pastoll_pc_weight_3 as config
parser = flagparse.FlagParser()
parser.add_flag('--theta_signal')
parser.add_flag('--rastersFlag')
parser.add_flag('--rates')
parser.add_flag('--maxFRSweeps')
parser.add_flag('--seizureProportion')
parser.add_flag('--maxFRGridsScatter')
parser.add_flag('--PSeizureGridsScatter')
args = parser.parse_args()
env = NoiseEnvironment(user_config=config.get_config())
if args.theta_signal or args.all:
env.register_plotter(noisefigs.plotters.ThetaSignalPlotter)
if args.rastersFlag or args.all:
env.register_plotter(noisefigs.plotters.EIRasterPlotter)
if args.rates or args.all:
env.register_plotter(noisefigs.plotters.EIRatePlotter)
if args.maxFRSweeps or args.all:
env.register_plotter(noisefigs.plotters.MaxPopulationFRSweepsPlotter)
if args.maxFRGridsScatter or args.all:
env.register_plotter(noisefigs.plotters.MaxFRGridsScatterAllPlotter)
if args.PSeizureGridsScatter or args.all:
env.register_plotter(noisefigs.plotters.PSeizureGridsScatterAllPlotter)
if args.seizureProportion or args.all:
env.register_plotter(noisefigs.plotters.PSeizureSweepPlotter)
env.plot()
|
MattNolanLab/ei-attractor
|
grid_cell_model/simulations/007_noise/figures/paper/i_surround/figure_seizures_pastoll_et_al_pc_weight_3.py
|
Python
|
gpl-3.0
| 1,431
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_menuitem_metin'),
]
operations = [
migrations.RemoveField(
model_name='menuitem',
name='metin',
),
]
|
neslihanturan/artge
|
app/migrations/0019_remove_menuitem_metin.py
|
Python
|
gpl-3.0
| 345
|
__author__ = 'amw'
from quicktest.Reporting import plot3d, plot2d, create_dataset, create_plot_matrix
# Initialise database config
db_config = {"hostname": "localhost", "port": "27017", "db_name": "simulations", "collection_name": "validation_050913"}
tag = "direct_detmerge_1m_040913-2"
x_param = "parameters.numSubs"
y_param = "parameters.numToSend_6"
z_param = "results.<Channel 6>: Latency - All nodes - Avgmean"
filter_list = [{"parameters.packetSize": "16B"},
{"parameters.packetSize": "32B"},
{"parameters.packetSize": "64B"},
{"parameters.packetSize": "128B"},
{"parameters.packetSize": "1024B"}
]
plot3d(db_config, tag, x_param, y_param, z_param, filter_list, 2, 3, "Direct Detmerge Average Latency All Nodes")
x_param = "parameters.numSubs"
y_param = "results.<Channel 6>: Latency - All nodes - Minmean"
filter_list = [{"parameters.packetSize": "16B", "parameters.numToSend_6": "1"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "3"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "5"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "7"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "10"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "15"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "20"},
{"parameters.packetSize": "16B", "parameters.numToSend_6": "30"}
]
plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Detmerge Minimum Latency All Nodes")
y_param = "results.<Channel 6>: Latency - All nodes - Maxmean"
plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Detmerge Maximum Latency All Nodes")
y_param = "results.<Channel 6>: Latency - All nodes - Avgmean"
plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Detmerge Average Latency All Nodes")
|
ClockworkOrigins/m2etis
|
configurator/quicktest/reporting/direct_detmerge_1m_04-09-13.py
|
Python
|
apache-2.0
| 2,001
|
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
A group of classes which implement the observer/observable and
publisher/subscriber pattern.
"""
# System Imports
import types
import sys
import string
import copy
# Sibling Imports
import reflect
import log
class _DontTell:
def __cmp__(self,other):
if isinstance(other,_DontTell):
return 0
else:
return -1
def __hash__(self): return id(self)
def __repr__(self):
return "observable.DontTell"
class _Gone:
def __cmp__(self,other):
if isinstance(other,_Gone):
return 0
else:
return -1
def __hash__(self): return id(self)
def __repr__(self):
return "observable.Gone"
DontTell=_DontTell()
Gone=_Gone()
class Dynamic:
def __init__(self, caller=None):
if caller:
self.evaluate=caller
def evaluate(self,observer,hash=None,key=None):
log.msg('observe.py: Dynamic.evaluate called directly --> override this')
log.msg('observer %s\nhash %s\nkey %s'%(observer,hash,key))
return DontTell
def __call__(self,observer,hash=None,key=None):
if type(observer)==types.MethodType:
observer=observer.im_self
return self.evaluate(observer,hash,key)
def propertize(self, observer,key,prop):
if isinstance(prop,Dynamic): p=prop(observer,self,key)
else: p=prop
if p == DontTell: raise p
return p
class EventSource:
def __init__(self):
self.listeners={}
def bind(self, event, command, args=()):
if not self.listeners.has_key(event):
self.listeners[event]=[]
self.listeners[event].append(command)
def fire(self, event, *args,**kw):
for listener in self.listeners[event]:
apply(listener,args,kw)
class Observable:
def __init__(self):
self.observers=[]
def addObserver(self, observer):
"""Observable.addObserver(observer)
Add a method which will be called when this observer's
notify() is executed."""
self.observers.append(observer)
self.observers = self.observers
def removeObserver(self, observer):
"""Observable.removeObserver(observer)
Remove a previously-added method would have been called when
this observer's notify() was executed"""
self.observers.remove(observer)
self.observers = self.observers
def notify(self, *rgs):
"""Observable.notify(*rgs)
call all observers of this observable with (self,)+rgs"""
args=(self,)+rgs
for observer in self.observers:
self.tell(observer, args)
self.observers = self.observers
def tell(self,observer,args):
apply(observer,args)
class Publisher:
subscribers = None
def unsubscribe(self, channel, subscriber):
"""Publisher.unsubscribe(channel, subscriber)
Unsubscribe a previously subscribed subscriber method from a
particular channel."""
subs = self.subscribers[channel]
subs.remove(subscriber)
if not subs:
del self.subscribers[channel]
def subscribe(self, channel, subscriber):
"""Publisher.subscribe(channel, subscriber)
Subscribe a 'subscriber' method to a 'channel' key (a python
identifier): whenver 'publish' is called with an equivalent
'channel' argument, , the subscriber will be called with the
signature (sender, channel, data), where 'sender' is this
publisher, 'channel' is the chosen channel key, and 'data' is
some arbitrary data. 'publish' will also call the method
on_%(channel)s on this object with data as the only
argument (plus the implicit self!) """
if self.subscribers is None:
self.subscribers = {}
l = self.subscribers.get(channel,[])
l.append(subscriber)
self.subscribers[channel] = l
def publish(self, channel, data):
"""Publisher.publish(channel,data)
Publish the given data to a channel -- call all subscriber
methods to this channel, with the arguments (self, channel,
data), and the default subscriber (named on_%s) with only
'data' as an argument"""
# Call the default subscriber
defaultSubscriber = getattr(self, "on_%s" % channel, None)
if defaultSubscriber is not None:
try:
defaultSubscriber(data)
except:
log.deferr()
# Now call all the regular subscribers.
if not self.subscribers: return
for subscriber in self.subscribers.get(channel,()):
try:
subscriber(self, channel, data)
except:
log.deferr()
class WhenMethodSubscription:
"""
This is a helper class to make the whole concept of when_
method subscriptions more friendly to pickling.
"""
def __init__(self, subscriber, attribute, channel):
self.subscriber = subscriber
self.attribute = attribute
self.channel = channel
def __cmp__(self, other):
if other is self:
return 0
if not reflect.isinst(other, WhenMethodSubscription):
return -1
for attr in 'subscriber','attribute','channel':
retval = cmp(getattr(self,attr),getattr(other,attr))
if retval != 0:
return retval
return 0
def __repr__(self):
return "<WhenMethodSubscription %s %s %s>" % (repr(self.subscriber),
self.attribute,
repr(self.channel))
def __call__(self, publisher, channel, message):
assert channel == self.channel, "Channel should be the same."
method = getattr(self.subscriber,
"when_"+self.attribute+"_"+self.channel, None)
if method is None:
log.msg("Unsubscribe due to Persistent Inconsistency:")
log.msg(string.join(map(
str,(self.publisher, self.subscriber,
self.attribute, self.channel))))
self.publisher.unsubscribe(self, publisher, subscriber, attribute)
return None
else:
return method(publisher, channel, message)
def registerWhenMethods(Class):
sa = Class._subscriberAttributes = {}
for base in Class.__bases__:
if issubclass(base, Subscriber):
sa.update(base._subscriberAttributes)
# The structure of this dictionary is
# {attributeName: {eventName: [listOfHandlers]}}
# The handler 'None' is treated specially, and the instance-method
# is subscribed when it is encountered.
for name,method in Class.__dict__.items():
# I want to turn method names like when_place_enter into a
# dictionary like {'place': {'enter': None}}.
if type(method) == types.FunctionType:
specname = string.split(name,'_')
# Okay. We've got some special naming stuff here.
if not len(specname) > 2:
continue
if not specname[0] == 'when':
continue
evtname = specname[1]
attrname = string.join(specname[2:],'_')
evtdict = sa.get(evtname,{})
evtdict[attrname] = [None]
sa[evtname] = evtdict
class Subscriber(reflect.Accessor):
_subscriberAttributes = {}
def subscribeToAttribute(self, attribute, channel, callback):
assert callable(callback), "Callback must be a callable type."
if self._subscriberAttributes is self.__class__._subscriberAttributes:
self._subscriberAttributes = copy.deepcopy(self._subscriberAttributes)
channels = self._subscriberAttributes.get(attribute,{})
handlers = channels.get(channel,[])
handlers.append(callback)
channels[channel] = handlers
self._subscriberAttributes[attribute] = channels
currentPublisher = getattr(self, attribute, None)
if reflect.isinst(currentPublisher, Publisher):
currentPublisher.subscribe(channel, callback)
def unsubscribeFromAttribute(self, attribute, channel, callback):
assert not (self._subscriberAttributes is
self.__class__._subscriberAttributes),\
"No attribute channels have been subscribed."
channels = self._subscriberAttributes[attribute]
handlers = channels[channel]
handlers.remove(callback)
currentPublisher = getattr(self, attribute, None)
if reflect.isinst(currentPublisher, Publisher):
currentPublisher.unsubscribe(channel, callback)
def reallyDel(self, key):
self._doSub(key, None)
reflect.Accessor.reallyDel(self,key)
def reallySet(self, key, val):
self._doSub(key,val)
reflect.Accessor.reallySet(self,key,val)
def _doSub(self, key, val):
attributeInfo = self._subscriberAttributes.get(key,{}).items()
previousAttribute = getattr(self,key,None)
if reflect.isinst(previousAttribute, Publisher):
for event, handlers in attributeInfo:
for handler in handlers:
if handler is None:
handler = WhenMethodSubscription(self, key, event)
# handler = getattr(self, "when_"+key+"_"+event)
try:
previousAttribute.unsubscribe(event, handler)
except KeyError:
# Since this deals with when_ methods, the
# most likely time for an exception to be
# thrown here is when you've added a when
# method. There's no point in stopping
# execution there.
pass
if reflect.isinst(val, Publisher):
for event, handlers in attributeInfo:
for handler in handlers:
if handler is None:
handler = WhenMethodSubscription(self, key, event)
#handler = getattr(self, "when_"+key+"_"+event)
val.subscribe(event, handler)
class Hash(Observable):
def __init__(self,properties=None):
Observable.__init__(self)
if properties is None:
properties={}
self.properties=properties
def tell(self,observer,targs):
self2,key,value=targs
# I assume I haven't seen this yet.
already_seen=0
try:
# Does this observer think there's something in this hash
# under this key already?
propertize(self,observer,key,self[key])
# Correction, I have.
already_seen=1
except _DontTell:
# If not, well,
if targs[2]==Gone:
# if we were just going to tell them that it was gone,
# forget about it.
return
except KeyError:
# That wasn't even in the dictionary before!
pass
try:
apply(observer,
(self2, key,
propertize(self,observer,key,value)))
except _DontTell:
# Okay, so this observer isn't supposed to know about this
# property.
if already_seen:
# If they already have it "in view", tell them it's
# gone now.
apply(observer,(self2,key,Gone))
# Otherwise, well, they don't know that anything has happened.
def addObserver(self, observer):
Observable.addObserver(self,observer)
for k,v in self.properties.items():
self.tell(observer,(self,k,v))
def __setitem__(self, key,val):
self.notify(key,val)
self.properties[key]=val
def __getitem__(self, key):
return self.properties[key]
def __len__(self):
return len(self.properties)
def __delitem__(self, key):
self.notify(key,Gone)
del self.properties[key]
def keys(self):
return self.properties.keys()
def values(self):
return self.properties.values()
def items(self):
return self.properties.items()
def update(self,dict):
for k,v in dict.items():
self[k]=v
def has_key(self,key):
return self.properties.has_key(key)
def __repr__(self):
if self.observers:
x=repr(self.observers)
else:
x=""
return "observable.Hash(%s%s)"%(repr(self.properties),x)
class Delegator:
def __init__(self, callhash):
self.hash=callhash
def __call__(self, *args):
try: observer=self.hash[args[1]]
except: 'no delegation'
else: apply(self.hash[args[1]],args)
|
fxia22/ASM_xf
|
PythonD/site_python/twisted/python/observable.py
|
Python
|
gpl-2.0
| 13,623
|
# -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Pierre Vacher <prrvchr@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
""" UsbPool panel Plugin object """
from __future__ import unicode_literals
import FreeCADGui
from PySide import QtCore, QtGui
from App import Script as AppScript
from Gui import UsbPoolModel, Script as GuiScript
class PoolTaskPanel:
def __init__(self, obj):
view = PoolPanel()
model = obj.ViewObject.Proxy.Model
if model.obj is None: model.obj = obj
view.setModel(model)
self.form = [view]
def accept(self):
FreeCADGui.ActiveDocument.resetEdit()
return True
def reject(self):
FreeCADGui.ActiveDocument.resetEdit()
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return False
def isAllowedAlterSelection(self):
return True
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Ok)
#return int(QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Cancel)
def helpRequested(self):
pass
class PoolPanel(QtGui.QGroupBox):
def __init__(self):
QtGui.QGroupBox.__init__(self)
self.setWindowIcon(QtGui.QIcon("icons:Usb-Pool.xpm"))
layout = QtGui.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
txt = QtGui.QLabel("Not implemented in this plugin!!!")
layout.addWidget(txt, 0, 0, 1, 1)
txt1 = QtGui.QLabel("Chose another plugin in Pool properties")
layout.addWidget(txt1, 1, 0, 1, 1)
def setModel(self, model):
if model.obj is not None:
self.setWindowTitle("Usb {} monitor".format(model.obj.Label))
class TaskWatcher:
def __init__(self):
self.title = b"Pool monitor"
self.icon = b"icons:Usb-Pool.xpm"
self.model = UsbPoolModel.PoolBaseModel()
self.view = PoolPanel()
self.widgets = [self.view]
def shouldShow(self):
for obj in FreeCADGui.Selection.getSelection():
if AppScript.getObjectType(obj) == "App::UsbPool" and\
GuiScript.getObjectViewType(obj.ViewObject) == "Gui::UsbPool":
model = obj.ViewObject.Proxy.Model
if model.obj is None: model.obj = obj
self.view.setModel(model)
return True
self.view.setModel(self.model)
return False
|
prrvchr/USBTerminal
|
USB/Gui/UsbPoolPanel.py
|
Python
|
gpl-2.0
| 4,080
|
# Name: Pre-Logic Script Code
# Created: April, 27, 2015
# Author: Sven Koberwitz
# Purpose: Find the number of occurrences of a value in a Field using Field Calculator.
## Pre-Logic Script Code
import arcpy
uniqueList = {}
## Set the name of the feature class here
fc = "feature_class"
rows = arcpy.SearchCursor(fc)
for row in rows:
## Set the name of the attribute here
value = row.getValue("field_name")
if value not in uniqueList:
uniqueList[value] = 1
else:
uniqueList[value] = uniqueList[value] + 1
def duplicates(inValue):
return uniqueList[inValue]
## Use this as the calculation formula
duplicates(!field_name!)
|
skobz/Python
|
Find_Duplicates.py
|
Python
|
mit
| 651
|
"""
Adapted code from "Contrast Limited Adaptive Histogram Equalization" by Karel
Zuiderveld <karel@cv.ruu.nl>, Graphics Gems IV, Academic Press, 1994.
http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi
The Graphics Gems code is copyright-protected. In other words, you cannot
claim the text of the code as your own and resell it. Using the code is
permitted in any program, product, or library, non-commercial or commercial.
Giving credit is not required, though is a nice gesture. The code comes as-is,
and if there are any flaws or problems with any Gems code, nobody involved with
Gems - authors, editors, publishers, or webmasters - are to be held
responsible. Basically, don't be a jerk, and remember that anything free
comes with no guarantee.
"""
from __future__ import division
import numbers
import numpy as np
from .. import img_as_float, img_as_uint
from ..color.adapt_rgb import adapt_rgb, hsv_value
from ..exposure import rescale_intensity
from .._shared.utils import skimage_deprecation, warnings
NR_OF_GREY = 2 ** 14 # number of grayscale levels to use in CLAHE algorithm
@adapt_rgb(hsv_value)
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01,
nbins=256, kernel_size=None):
"""Contrast Limited Adaptive Histogram Equalization (CLAHE).
An algorithm for local contrast enhancement, that uses histograms computed
over different tile regions of the image. Local details can therefore be
enhanced even in regions that are darker or lighter than most of the image.
Parameters
----------
image : array-like
Input image.
kernel_size: integer or 2-tuple
Defines the shape of contextual regions used in the algorithm.
If an integer is given, the shape will be a square of
sidelength given by this value.
ntiles_x : int, optional (deprecated in favor of ``kernel_size``)
Number of tile regions in the X direction (horizontal).
ntiles_y : int, optional (deprecated in favor of ``kernel_size``)
Number of tile regions in the Y direction (vertical).
clip_limit : float: optional
Clipping limit, normalized between 0 and 1 (higher values give more
contrast).
nbins : int, optional
Number of gray bins for histogram ("dynamic range").
Returns
-------
out : ndarray
Equalized image.
See Also
--------
equalize_hist, rescale_intensity
Notes
-----
* For color images, the following steps are performed:
- The image is converted to HSV color space
- The CLAHE algorithm is run on the V (Value) channel
- The image is converted back to RGB space and returned
* For RGBA images, the original alpha channel is removed.
References
----------
.. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi
.. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE
"""
image = img_as_uint(image)
image = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1))
if kernel_size is None:
warnings.warn('`ntiles_*` have been deprecated in favor of '
'`kernel_size`. The `ntiles_*` keyword arguments '
'will be removed in v0.14', skimage_deprecation)
ntiles_x = ntiles_x or 8
ntiles_y = ntiles_y or 8
kernel_size = (np.round(image.shape[0] / ntiles_y),
np.round(image.shape[1] / ntiles_x))
if isinstance(kernel_size, numbers.Number):
kernel_size = (kernel_size, kernel_size)
kernel_size = [int(k) for k in kernel_size]
image = _clahe(image, kernel_size, clip_limit * nbins, nbins)
image = img_as_float(image)
return rescale_intensity(image)
def _clahe(image, kernel_size, clip_limit, nbins=128):
"""Contrast Limited Adaptive Histogram Equalization.
Parameters
----------
image : array-like
Input image.
kernel_size: 2-tuple
Defines the shape of contextual regions used in the algorithm.
clip_limit : float, optional
Normalized clipping limit (higher values give more contrast).
nbins : int, optional
Number of gray bins for histogram ("dynamic range").
Returns
-------
out : ndarray
Equalized image.
The number of "effective" greylevels in the output image is set by `nbins`;
selecting a small value (eg. 128) speeds up processing and still produce
an output image of good quality. The output image will have the same
minimum and maximum value as the input image. A clip limit smaller than 1
results in standard (non-contrast limited) AHE.
"""
if clip_limit == 1.0:
return image # is OK, immediately returns original image.
nr = int(np.ceil(image.shape[0] / kernel_size[0]))
nc = int(np.ceil(image.shape[1] / kernel_size[1]))
row_step = int(np.floor(image.shape[0] / nr))
col_step = int(np.floor(image.shape[1] / nc))
bin_size = 1 + NR_OF_GREY // nbins
lut = np.arange(NR_OF_GREY)
lut //= bin_size
map_array = np.zeros((nr, nc, nbins), dtype=int)
# Calculate greylevel mappings for each contextual region
for r in range(nr):
for c in range(nc):
sub_img = image[r * row_step: (r + 1) * row_step,
c * col_step: (c + 1) * col_step]
if clip_limit > 0.0: # Calculate actual cliplimit
clim = int(clip_limit * sub_img.size / nbins)
if clim < 1:
clim = 1
else:
clim = NR_OF_GREY # Large value, do not clip (AHE)
hist = lut[sub_img.ravel()]
hist = np.bincount(hist)
hist = np.append(hist, np.zeros(nbins - hist.size, dtype=int))
hist = clip_histogram(hist, clim)
hist = map_histogram(hist, 0, NR_OF_GREY - 1, sub_img.size)
map_array[r, c] = hist
# Interpolate greylevel mappings to get CLAHE image
rstart = 0
for r in range(nr + 1):
cstart = 0
if r == 0: # special case: top row
r_offset = row_step / 2.0
rU = 0
rB = 0
elif r == nr: # special case: bottom row
r_offset = row_step / 2.0
rU = nr - 1
rB = rU
else: # default values
r_offset = row_step
rU = r - 1
rB = rB + 1
for c in range(nc + 1):
if c == 0: # special case: left column
c_offset = col_step / 2.0
cL = 0
cR = 0
elif c == nc: # special case: right column
c_offset = col_step / 2.0
cL = nc - 1
cR = cL
else: # default values
c_offset = col_step
cL = c - 1
cR = cL + 1
mapLU = map_array[rU, cL]
mapRU = map_array[rU, cR]
mapLB = map_array[rB, cL]
mapRB = map_array[rB, cR]
cslice = np.arange(cstart, cstart + c_offset)
rslice = np.arange(rstart, rstart + r_offset)
interpolate(image, cslice, rslice,
mapLU, mapRU, mapLB, mapRB, lut)
cstart += c_offset # set pointer on next matrix */
rstart += r_offset
return image
def clip_histogram(hist, clip_limit):
"""Perform clipping of the histogram and redistribution of bins.
The histogram is clipped and the number of excess pixels is counted.
Afterwards the excess pixels are equally redistributed across the
whole histogram (providing the bin count is smaller than the cliplimit).
Parameters
----------
hist : ndarray
Histogram array.
clip_limit : int
Maximum allowed bin count.
Returns
-------
hist : ndarray
Clipped histogram.
"""
# calculate total number of excess pixels
excess_mask = hist > clip_limit
excess = hist[excess_mask]
n_excess = excess.sum() - excess.size * clip_limit
# Second part: clip histogram and redistribute excess pixels in each bin
bin_incr = int(n_excess / hist.size) # average binincrement
upper = clip_limit - bin_incr # Bins larger than upper set to cliplimit
hist[excess_mask] = clip_limit
low_mask = hist < upper
n_excess -= hist[low_mask].size * bin_incr
hist[low_mask] += bin_incr
mid_mask = (hist >= upper) & (hist < clip_limit)
mid = hist[mid_mask]
n_excess -= mid.size * clip_limit - mid.sum()
hist[mid_mask] = clip_limit
prev_n_excess = n_excess
while n_excess > 0: # Redistribute remaining excess
index = 0
while n_excess > 0 and index < hist.size:
under_mask = hist < 0
step_size = int(hist[hist < clip_limit].size / n_excess)
step_size = max(step_size, 1)
indices = np.arange(index, hist.size, step_size)
under_mask[indices] = True
under_mask = (under_mask) & (hist < clip_limit)
hist[under_mask] += 1
n_excess -= under_mask.sum()
index += 1
# bail if we have not distributed any excess
if prev_n_excess == n_excess:
break
prev_n_excess = n_excess
return hist
def map_histogram(hist, min_val, max_val, n_pixels):
"""Calculate the equalized lookup table (mapping).
It does so by cumulating the input histogram.
Parameters
----------
hist : ndarray
Clipped histogram.
min_val : int
Minimum value for mapping.
max_val : int
Maximum value for mapping.
n_pixels : int
Number of pixels in the region.
Returns
-------
out : ndarray
Mapped intensity LUT.
"""
out = np.cumsum(hist).astype(float)
scale = ((float)(max_val - min_val)) / n_pixels
out *= scale
out += min_val
out[out > max_val] = max_val
return out.astype(int)
def interpolate(image, xslice, yslice,
mapLU, mapRU, mapLB, mapRB, lut):
"""Find the new grayscale level for a region using bilinear interpolation.
Parameters
----------
image : ndarray
Full image.
xslice, yslice : array-like
Indices of the region.
map* : ndarray
Mappings of greylevels from histograms.
lut : ndarray
Maps grayscale levels in image to histogram levels.
Returns
-------
out : ndarray
Original image with the subregion replaced.
Notes
-----
This function calculates the new greylevel assignments of pixels within
a submatrix of the image. This is done by a bilinear interpolation between
four different mappings in order to eliminate boundary artifacts.
"""
norm = xslice.size * yslice.size # Normalization factor
# interpolation weight matrices
x_coef, y_coef = np.meshgrid(np.arange(xslice.size),
np.arange(yslice.size))
x_inv_coef, y_inv_coef = x_coef[:, ::-1] + 1, y_coef[::-1] + 1
view = image[int(yslice[0]):int(yslice[-1] + 1),
int(xslice[0]):int(xslice[-1] + 1)]
im_slice = lut[view]
new = ((y_inv_coef * (x_inv_coef * mapLU[im_slice]
+ x_coef * mapRU[im_slice])
+ y_coef * (x_inv_coef * mapLB[im_slice]
+ x_coef * mapRB[im_slice]))
/ norm)
view[:, :] = new
return image
|
jwiggins/scikit-image
|
skimage/exposure/_adapthist.py
|
Python
|
bsd-3-clause
| 11,437
|
from .children_watcher import ChildrenWatcher
from .sequential import SequentialRecipe
class Party(SequentialRecipe):
sub_recipes = {
"watcher": ChildrenWatcher,
}
def __init__(self, base_path, name):
super().__init__(base_path)
self.name = name
self.members = []
self.change_future = None
async def join(self):
await self.create_unique_znode(self.name)
_, siblings = await self.analyze_siblings()
self.update_members(siblings)
self.watcher.add_callback(self.base_path, self.update_members)
async def wait_for_change(self):
if not self.change_future or self.change_future.done():
loop = asyncio.get_running_loop()
self.change_future = loop.create_future()
await self.change_future
async def leave(self):
self.watcher.remove_callback(self.base_path, self.update_members)
await self.delete_unique_znode(self.name)
def update_members(self, raw_sibling_names):
new_members = [
self.determine_znode_label(sibling)
for sibling in raw_sibling_names
]
self.members = new_members
if self.change_future and not self.change_future.done():
self.change_future.set_result(new_members)
|
tipsi/aiozk
|
aiozk/recipes/party.py
|
Python
|
mit
| 1,306
|
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import redirect, render
from account.user_functions import can_create_character
from messaging.models import ServerMOTD
def register_view(request):
if request.user.is_authenticated:
return redirect('account:home')
if request.POST:
username = request.POST.get('username')
email = request.POST.get('email')
password = request.POST.get('password')
password2 = request.POST.get('password2')
if User.objects.filter(email=email).exists():
messages.add_message(
request,
messages.ERROR,
"An account with this email address already exists",
extra_tags="danger"
)
return redirect('account:register')
if User.objects.filter(username=username).exists():
messages.add_message(
request,
messages.ERROR,
"An account with this username already exists",
extra_tags="danger"
)
return redirect('account:register')
if password != password2:
messages.add_message(request, messages.ERROR, "Your passwords don't match!", extra_tags="danger")
return redirect('account:register')
User.objects.create_user(
username=username,
email=email,
password=password
)
messages.add_message(request, messages.SUCCESS, "Account created. Please log in.", extra_tags="success")
return redirect('account:login')
else:
return render(request, 'account/register.html')
def login_view(request):
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
messages.add_message(request, messages.SUCCESS, "Login successful.", extra_tags='success')
return redirect('account:home')
else:
messages.add_message(
request,
messages.ERROR,
"Your username and/or your password is incorrect.",
extra_tags='warning'
)
return redirect('account:login')
else:
if request.user.is_authenticated:
return redirect('account:home')
else:
return render(request, 'account/login.html')
@login_required
def logout_view(request):
logout(request)
messages.add_message(request, messages.SUCCESS, "You have been logged out.", extra_tags='success')
return redirect('base:home')
@login_required
def home(request):
context = {
'server_messages': ServerMOTD.objects.all() if request.user.is_staff
else ServerMOTD.objects.filter(draft=False),
'can_create_character': can_create_character(request.user)
}
return render(request, 'account/home.html', context=context)
|
jardiacaj/finem_imperii
|
account/views.py
|
Python
|
agpl-3.0
| 3,269
|
import json
import re
import string
import random
from collections import Counter
from decimal import Decimal
from residue.models import ResidueGenericNumberEquivalent
from signprot.models import SignprotComplex
from protein.models import Protein, ProteinCouplings
from common.definitions import AMINO_ACID_GROUPS
from django.core.exceptions import ObjectDoesNotExist
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_entry_names(request):
"""Extract a list of entry names from the post request"""
prot_confs = request.POST.getlist("pos[]")
complex_objs = SignprotComplex.objects.prefetch_related('structure__protein_conformation__protein').filter(structure__protein_conformation__in=prot_confs)
entry_names = [complex_obj.structure.protein_conformation.protein.entry_name for complex_obj in complex_objs]
return entry_names
def get_ignore_info(request):
"""Extract a dict of residues to ignore from the post request"""
ignore_dict = request.POST.get("ignore")
return json.loads(ignore_dict)
def get_class_slug(common_class):
# access the list of the most common element and get the value
value = common_class[0][0]
# extract the class character, e.g. Class B1 Receptor -> B
value = value.split(' ')[1][0]
# return the lowercase character
return value.lower()
def get_protein_segments(request):
"""From a list of given generic numbers (3x50), query appropriate generic residue
number objects"""
segments = []
segment_raw = request.POST.getlist("seg[]")
selected_receptor_classes = request.POST.getlist("selectedreceptorclasses[]")
for s in segment_raw:
try:
gen_object = ResidueGenericNumberEquivalent.objects.filter(
label=s, scheme__slug__in=['gpcrdba']
).get()
segments.append(gen_object)
except ObjectDoesNotExist as e:
print("For {} a {} ".format(s, e))
continue
return segments
def get_generic_numbers(signature_data):
"""Parse the generic numbers in the signature data"""
generic_numbers = []
for _, segments in signature_data["common_generic_numbers"].items():
for elem, num in segments.items():
gnl = []
for x, dn in num.items():
gnl.append(x)
generic_numbers.append(gnl)
return generic_numbers
def get_signature_features(signature_data, generic_numbers, feats):
"""Extract the signature features and prepare for visualization"""
signature_features = []
x = 0
tmp = list()
for segment, s in signature_data["a_pos"].consensus.items():
for p, r in s.items():
tmp.append({
"aa": r[0],
"aa_cons": r[2]
})
for i, feature in enumerate(signature_data["a_pos"].feature_stats):
for j, segment in enumerate(feature):
for k, freq in enumerate(segment):
# freq0: score
# freq1: level of conservation
# freq2: a - b explanation
try:
if int(freq[0]) > 0:
dkey = int(x)
dfeature = str(feats[i][0])
dfeature_code = str(feats[i][1])
dlength = str(feats[i][2])
dgn = str(generic_numbers[j][k])
dfreq = int(freq[0])
dcons = int(freq[1])
sort_code = dfeature_code + "_" + dlength
if sort_code in AMINO_ACID_GROUPS:
sort_score = len(AMINO_ACID_GROUPS[sort_code])
elif dfeature_code == 'Y':
print('Y_?')
sort_code = dfeature_code + "_" + '?'
sort_score = len(AMINO_ACID_GROUPS[sort_code])
else:
sort_score = 99
signature_features.append(
{
"key": dkey,
"feature": dfeature,
"feature_code": dfeature_code,
"length": dlength,
"gn": dgn,
"freq": dfreq,
"cons": dcons,
"sort_score": sort_score,
# 'expl': str(freq[2]),
"aa": str(tmp[k]["aa"]),
"aa_cons": int(tmp[k]["aa_cons"]),
"sort_code": str(sort_code),
}
)
x += 1
except Exception as e:
print(e)
continue
return signature_features
def group_signature_features(signature_features):
"""Further prepare signature feature dict for visualization"""
grouped_features = {}
for feature in signature_features:
if feature["gn"] not in grouped_features:
grouped_features[feature["gn"]] = []
grouped_features[feature["gn"]].append(feature)
for key in grouped_features:
curr_group = grouped_features[key]
grouped_features[key] = sorted(
curr_group, key=lambda feature: feature["freq"], reverse=True
)
return grouped_features
def get_signature_consensus(signature_data, generic_numbers):
"""Extract the signature consensus and prepare for visualization"""
sigcons = []
x = 0
for segment, cons in signature_data["feats_cons_pos"].items():
for pos in cons:
# pos0: Code
# pos1: Name
# pos2: Score
# pos3: level of conservation
# res0: Property Abbreviation
# res1: Feature Score
# res2: Conservation Level
try:
sigcons.append(
{
"key": int(x),
"gn": str(generic_numbers[x]),
"code": str(pos[0]),
"feature": str(pos[1]),
"score": int(pos[2]),
"cons": int(pos[3]),
"length": str(pos[4]),
}
)
x += 1
except Exception as e:
print(e)
return sigcons
def prepare_signature_match(signature_match):
repl_str = id_generator(6)
sign_true_1 = '<div class="{}">'.format(repl_str)
sign_true_2 = '{}</div>'
sign_false = '<div></div>'
gprots = ['Gs','Gi/o','Gq/11','G12/13']
class_coupling = 'coupling '
coupling_data = prepare_coupling_data_container()
coupling_data = fill_coupling_data_container(coupling_data)
coupling_data = process_coupling_data(coupling_data)
coupling_data_dict = {}
for e in coupling_data:
coupling_data_dict[e['rec_obj'].entry_name] = e
out = {}
for elem in signature_match["scores"].items():
entry = elem[0].protein.entry_name
out[entry] = {
"entry": elem[0].protein.entry_short(),
"prot": elem[0].protein.name,
"score": elem[1][0],
"nscore": round(elem[1][1], 0),
"class": elem[0].protein.get_protein_class().strip().split(' ')[1],
"family": elem[0].protein.get_protein_family(),
"subfamily": elem[0].protein.get_protein_subfamily(),
}
for elem in signature_match["scores"].items():
entry = elem[0].protein.entry_name
coupling_entry = coupling_data_dict.get(entry)
sources = ["GuideToPharma", "Aska", "Merged"]
for source in sources:
out[entry][source] = {}
for gprot in gprots:
out[entry][source][gprot] = {}
if coupling_entry:
ce = coupling_entry
cl = ce['coupling'][source].get(gprot, '')
if ce[source][gprot]:
if cl[:4] == 'prim':
html_val = sign_true_1.replace(repl_str, class_coupling+cl[:4]) + sign_true_2.format(cl)
text_val = cl
elif cl[:4] == 'seco':
html_val = sign_true_1.replace(repl_str, class_coupling+cl[:4]) + sign_true_2.format(cl)
text_val = cl
elif cl[:2] == 'no':
html_val = sign_true_1.replace(repl_str, class_coupling+cl[:2]) + sign_true_2.format(cl)
text_val = cl
else:
html_val = sign_false
text_val = ''
out[entry][source][gprot]['html'] = html_val
out[entry][source][gprot]['text'] = text_val
else:
out[entry][source][gprot]['html'] = sign_false
out[entry][source][gprot]['text'] = ''
else:
out[entry][source][gprot]['html'] = sign_false
out[entry][source][gprot]['text'] = ''
return out
def prepare_coupling_data_container():
class_names = {}
data = {}
proteins = (
Protein.objects.filter(
sequence_type__slug="wt",
family__slug__startswith="00",
species__common_name="Human")
.prefetch_related("family")
)
for p in proteins:
p_class = p.family.slug.split("_")[0]
if p_class not in class_names:
class_names[p_class] = re.sub(
r"\([^)]*\)", "", p.family.parent.parent.parent.name
)
p_class_name = class_names[p_class].strip()
data[p.entry_short()] = {
"class": p_class_name,
"pretty": p.short()[:15],
"GuideToPharma": {},
"Aska": {},
"rec_class": p.get_protein_class(),
"rec_obj": p,
"rec_pdb": p.entry_short(),
}
return data
def fill_coupling_data_container(data):
distinct_sources = ["GuideToPharma", "Aska"]
couplings = ProteinCouplings.objects.filter(source__in=distinct_sources).prefetch_related(
"protein", "g_protein_subunit", "g_protein"
)
for c in couplings:
p = c.protein.entry_short()
# Skip entries without any annotation
if p not in data:
continue
s = c.source
t = c.transduction
m = c.logmaxec50
gf = c.g_protein.name
gf = gf.replace(" family", "")
if c.g_protein_subunit:
g = c.g_protein_subunit.entry_name
g = g.replace("_human", "")
if s not in data[p]:
data[p][s] = {}
if gf not in data[p][s]:
data[p][s][gf] = {}
# If transduction in GuideToPharma data
if t:
data[p][s][gf] = t
else:
if "subunits" not in data[p][s][gf]:
data[p][s][gf] = {"subunits": {}, "best": -2.00}
data[p][s][gf]["subunits"][g] = round(Decimal(m), 2)
if round(Decimal(m), 2) == -0.00:
data[p][s][gf]["subunits"][g] = 0.00
# get the lowest number into 'best'
if m > data[p][s][gf]["best"]:
data[p][s][gf]["best"] = round(Decimal(m), 2)
return data
def process_coupling_data(data):
res = []
for entry in data.keys():
i = data[entry]
e = {}
c_gtop = extract_coupling_bool(i, "GuideToPharma")
p_gtop = extract_coupling_primary(i["GuideToPharma"])
c_aska = extract_coupling_bool(i, "Aska")
p_aska = extract_coupling_primary(c_aska[1])
c_merg = extract_coupling_bool(i, "Merged")
p_merg = extract_coupling_primary(c_merg[1])
e['coupling'] = {}
e["GuideToPharma"] = {}
e["Aska"] = {}
e["Merged"] = {}
e["rec_class"] = i["rec_class"]
e["rec_obj"] = i["rec_obj"]
e["key"] = entry
e["coupling"]["GuideToPharma"] = i["GuideToPharma"]
e["coupling"]["Aska"] = c_aska[1]
e["coupling"]["Merged"] = c_merg[1]
for x in ["Gs", "Gi/o", "Gq/11", "G12/13"]:
e["GuideToPharma"][x] = c_gtop[x]
e["Aska"][x] = c_aska[0][x]
e["Merged"][x] = c_merg[0][x]
e["GuideToPharma"]["gprot"] = p_gtop
e["Aska"]["gprot"] = p_aska
e["Merged"]["gprot"] = p_merg
res.append(e)
return res
def extract_coupling_bool(gp, source):
distinct_g_families = ['Gs','Gi/o', 'Gq/11', 'G12/13', ]
threshold_primary = -0.1
threshold_secondary = -1
if source == 'GuideToPharma':
gp = gp[source]
c = {"Gi/o": False, "Gs": False, "Gq/11": False, "G12/13": False}
for key in c:
if key in gp:
c[key] = True
return c
elif source == 'Aska':
gp = gp[source]
c = {"Gi/o": False, "Gs": False, "Gq/11": False, "G12/13": False}
c_levels = {}
for gf in distinct_g_families:
if gf in gp:
if gp[gf]['best']>threshold_primary:
c[gf] = True
c_levels[gf] = "primary"
elif gp[gf]['best']>threshold_secondary:
c[gf] = True
c_levels[gf] = "secondary"
else:
c[gf] = True
c_levels[gf] = "no coupling"
return (c, c_levels)
elif source == 'Merged':
c = {"Gi/o": False, "Gs": False, "Gq/11": False, "G12/13": False}
c_levels = {}
v = gp
for gf in distinct_g_families:
values = []
if 'GuideToPharma' in v and gf in v['GuideToPharma']:
values.append(v['GuideToPharma'][gf])
if 'Aska' in v and gf in v['Aska']:
best = v['Aska'][gf]['best']
if best > threshold_primary:
values.append('primary')
elif best > threshold_secondary:
values.append('secondary')
else:
values.append("no coupling")
if 'primary' in values:
c[gf] = True
c_levels[gf] = "primary"
elif 'secondary' in values:
c[gf] = True
c_levels[gf] = "secondary"
elif 'no coupling' in values:
c[gf] = True
c_levels[gf] = "no coupling"
return (c, c_levels)
def extract_coupling_primary(gp):
p = []
for key in gp:
if gp[key] == "primary":
p.append(key)
return p
|
protwis/protwis
|
signprot/interactions.py
|
Python
|
apache-2.0
| 14,959
|
#!/usr/bin/env python
# CREATED: 2013-10-06 22:31:29 by Dawen Liang <dl2771@columbia.edu>
# unit tests for librosa.decompose
import warnings
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import numpy as np
import scipy.sparse
import librosa
import sklearn.decomposition
from nose.tools import raises
from test_core import srand
warnings.resetwarnings()
warnings.simplefilter('always')
def test_default_decompose():
X = np.array([[1, 2, 3, 4, 5, 6], [1, 1, 1.2, 1, 0.8, 1]])
(W, H) = librosa.decompose.decompose(X, random_state=0)
assert np.allclose(X, W.dot(H), rtol=1e-2, atol=1e-2)
def test_given_decompose():
D = sklearn.decomposition.NMF(random_state=0)
X = np.array([[1, 2, 3, 4, 5, 6], [1, 1, 1.2, 1, 0.8, 1]])
(W, H) = librosa.decompose.decompose(X, transformer=D)
assert np.allclose(X, W.dot(H), rtol=1e-2, atol=1e-2)
def test_decompose_fit():
srand()
D = sklearn.decomposition.NMF(random_state=0)
X = np.array([[1, 2, 3, 4, 5, 6], [1, 1, 1.2, 1, 0.8, 1]])
# Do a first fit
(W, H) = librosa.decompose.decompose(X, transformer=D, fit=True)
# Make random data and decompose with the same basis
X = np.random.randn(*X.shape)**2
(W2, H2) = librosa.decompose.decompose(X, transformer=D, fit=False)
# Make sure the basis hasn't changed
assert np.allclose(W, W2)
@raises(librosa.ParameterError)
def test_decompose_fit_false():
X = np.array([[1, 2, 3, 4, 5, 6], [1, 1, 1.2, 1, 0.8, 1]])
(W, H) = librosa.decompose.decompose(X, fit=False)
def test_sorted_decompose():
X = np.array([[1, 2, 3, 4, 5, 6], [1, 1, 1.2, 1, 0.8, 1]])
(W, H) = librosa.decompose.decompose(X, sort=True, random_state=0)
assert np.allclose(X, W.dot(H), rtol=1e-2, atol=1e-2)
def test_real_hpss():
# Load an audio signal
y, sr = librosa.load('data/test1_22050.wav')
D = np.abs(librosa.stft(y))
def __hpss_test(window, power, mask, margin):
H, P = librosa.decompose.hpss(D, kernel_size=window, power=power,
mask=mask, margin=margin)
if margin == 1.0 or margin == (1.0, 1.0):
if mask:
assert np.allclose(H + P, np.ones_like(D))
else:
assert np.allclose(H + P, D)
else:
if mask:
assert np.all(H + P <= np.ones_like(D))
else:
assert np.all(H + P <= D)
for window in [31, (5, 5)]:
for power in [1, 2, 10]:
for mask in [False, True]:
for margin in [1.0, 3.0, (1.0, 1.0), (9.0, 10.0)]:
yield __hpss_test, window, power, mask, margin
@raises(librosa.ParameterError)
def test_hpss_margin_error():
y, sr = librosa.load('data/test1_22050.wav')
D = np.abs(librosa.stft(y))
H, P = librosa.decompose.hpss(D, margin=0.9)
def test_complex_hpss():
# Load an audio signal
y, sr = librosa.load('data/test1_22050.wav')
D = librosa.stft(y)
H, P = librosa.decompose.hpss(D)
assert np.allclose(H + P, D)
def test_nn_filter_mean():
srand()
X = np.random.randn(10, 100)
# Build a recurrence matrix, just for testing purposes
rec = librosa.segment.recurrence_matrix(X)
X_filtered = librosa.decompose.nn_filter(X)
# Normalize the recurrence matrix so dotting computes an average
rec = librosa.util.normalize(rec, axis=1, norm=1)
assert np.allclose(X_filtered, X.dot(rec.T))
def test_nn_filter_mean_rec():
srand()
X = np.random.randn(10, 100)
# Build a recurrence matrix, just for testing purposes
rec = librosa.segment.recurrence_matrix(X)
# Knock out the first three rows of links
rec[:3] = 0
X_filtered = librosa.decompose.nn_filter(X, rec=rec)
for i in range(3):
assert np.allclose(X_filtered[:, i], X[:, i])
# Normalize the recurrence matrix
rec = librosa.util.normalize(rec, axis=1, norm=1)
assert np.allclose(X_filtered[:, 3:], (X.dot(rec.T))[:, 3:])
def test_nn_filter_mean_rec_sparse():
srand()
X = np.random.randn(10, 100)
# Build a recurrence matrix, just for testing purposes
rec = librosa.segment.recurrence_matrix(X, sparse=True)
X_filtered = librosa.decompose.nn_filter(X, rec=rec)
# Normalize the recurrence matrix
rec = librosa.util.normalize(rec.toarray(), axis=1, norm=1)
assert np.allclose(X_filtered, (X.dot(rec.T)))
def test_nn_filter_avg():
srand()
X = np.random.randn(10, 100)
# Build a recurrence matrix, just for testing purposes
rec = librosa.segment.recurrence_matrix(X, mode='affinity')
X_filtered = librosa.decompose.nn_filter(X, rec=rec, aggregate=np.average)
# Normalize the recurrence matrix so dotting computes an average
rec = librosa.util.normalize(rec, axis=1, norm=1)
assert np.allclose(X_filtered, X.dot(rec.T))
def test_nn_filter_badselfsim():
@raises(librosa.ParameterError)
def __test(x, y, sparse):
srand()
X = np.empty((10, 100))
# Build a recurrence matrix, just for testing purposes
rec = np.random.randn(x, y)
if sparse:
rec = scipy.sparse.csr_matrix(rec)
librosa.decompose.nn_filter(X, rec=rec)
for (x, y) in [(10, 10), (100, 20), (20, 100), (100, 101), (101, 101)]:
for sparse in [False, True]:
yield __test, x, y, sparse
|
ruohoruotsi/librosa
|
tests/test_decompose.py
|
Python
|
isc
| 5,448
|
"""
This module has the basic decorators of IoTPy
"""
import numpy as np
import sys
sys.path.append("../../IoTPy/core")
sys.path.append("../../IoTPy/agent_types")
sys.path.append("../../IoTPy/helper_functions")
# agent and stream are in IoTPy/IoTPy/core
from stream import Stream, run
# recent_values is in IoTPy/IoTPy/helper_functions
from recent_values import recent_values
# basics is in IoTPy/IoTPy/agent_types
from basics import merge_e, fmerge_e, merge_asynch
from basics import fmerge_2e, fmerge_w, fmerge_2w
def examples_merge():
#----------------------------------------------
# Simple merge of list of streams.
# Decorate a conventional function to get a
# stream function.
@fmerge_e
def sum_stream(l):
# l is a list.
return sum(l)
# Create streams.
x = Stream('X')
y = Stream('Y')
# Call decorated function.
t = sum_stream([x, y])
# Put data into input streams.
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Check values of output streams.
assert recent_values(t) == [
100, 102, 104, 106, 108,
110, 112, 114, 116, 118]
#----------------------------------------------
# Merge list of streams with keyword argument.
# Decorate a conventional function to get a
# stream function.
@fmerge_e
def h(l, addend):
# l is a list.
return sum(l) + addend
# Create streams.
x = Stream('X')
y = Stream('Y')
# Call decorated function.
t = h([x, y], addend=1000)
# Put data into input streams.
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Check values of output streams.
assert recent_values(t) == [
1100, 1102, 1104, 1106, 1108,
1110, 1112, 1114, 1116, 1118]
#----------------------------------------------
# Merge list of streams with keyword argument
# and state.
# Decorate a conventional function to get a
# stream function.
@fmerge_e
def h(l, state, addend):
# l is a list.
next_state = state + 1
return sum(l) + addend + state, next_state
# Create streams.
x = Stream('X')
y = Stream('Y')
# Call decorated function.
t = h([x, y], state=0, addend=1000)
# Put data into input streams
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Check values of output streams.
assert recent_values(t) == [
1100, 1103, 1106, 1109, 1112,
1115, 1118, 1121, 1124, 1127]
#----------------------------------------------
# Merge list of streams with state.
# Decorate a conventional function to get a
# stream function.
@fmerge_e
def h(l, state):
# l is a list.
next_state = state + 1
return sum(l) + state, next_state
# Create streams.
x = Stream('X')
y = Stream('Y')
# Call decorated function.
t = h([x, y], state=0)
# Put data into input streams
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Check values of output streams.
assert recent_values(t) == [
100, 103, 106, 109, 112, 115,
118, 121, 124, 127]
#----------------------------------------------
# Asynchonous merge.
# Decorate a conventional function to get a
# stream function.
@merge_asynch
def h(v):
# v is an argument of any input stream.
return 2*v
# Create streams.
x = Stream('X')
y = Stream('Y')
# Call decorated function.
h([x, y], t)
# Put data into input streams.
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Print contents of output streams.
#print recent_values(t)
#----------------------------------------------
# Asynchonous merge with state.
# Decorate a conventional function to get a
# stream function.
@merge_asynch
def h(v, state):
next_state = state+1
return 2*v+state, next_state
# Create streams.
x = Stream('x')
y = Stream('y')
# Call decorated function.
h([x, y], t, state=0)
# Put data into input streams.
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
#print recent_values(t)
#----------------------------------------------
# Asynchonous merge with keyword parameter.
@merge_asynch
def h(v, addend):
return 2*v + addend
# Create streams.
x = Stream('X')
y = Stream('Y')
# Call decorated function.
h([x, y], t, addend=1000)
# Put data into input streams.
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
#print recent_values(t)
#----------------------------------------------
# Merge two streams.
# Decorate a conventional function to get a
# stream function.
@fmerge_2e
def h(x,y):
# x,y are elements of the two input streams.
return x+2*y
# Create streams.
x = Stream()
y = Stream()
# Call decorated function.
t = h(x, y)
# Put data into input streams.
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Check values of output streams.
assert recent_values(t) == [
200, 203, 206, 209, 212, 215,
218, 221, 224, 227]
#----------------------------------------------
# Merge two streams with keyword parameters.
# Decorate a conventional function to get a
# stream function.
@fmerge_2e
def h(x, y, addend):
# x,y are elements of the two input streams.
return x+2*y + addend
x = Stream()
y = Stream()
t = h(x, y, addend=1000)
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
1200, 1203, 1206, 1209, 1212, 1215,
1218, 1221, 1224, 1227]
#----------------------------------------------
# Merge two streams with keyword parameters and
# state.
# Decorate a conventional function to get a
# stream function.
@fmerge_2e
def h(x, y, state, addend):
# x,y are elements of the two input streams.
next_state = state + 1
return x+2*y + addend + state, next_state
x = Stream()
y = Stream()
t = h(x, y, state= 0, addend=1000)
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
1200, 1204, 1208, 1212, 1216,
1220, 1224, 1228, 1232, 1236]
#----------------------------------------------
# Merge two streams with state.
# Decorate a conventional function to get a
# stream function.
@fmerge_2e
def h(x, y, state):
# x,y are elements of the two input streams.
next_state = state + 1
return x+2*y + state, next_state
x = Stream()
y = Stream()
t = h(x, y, state= 0)
x.extend(list(range(10)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
200, 204, 208, 212, 216,
220, 224, 228, 232, 236]
#----------------------------------------------
# Merge list of streams: window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_w
def h(list_of_windows):
window_0, window_1 = list_of_windows
return sum(window_0) + 2*sum(window_1)
# Create streams.
x = Stream()
y = Stream()
# Call decorated function.
in_streams = [x, y]
t = h(in_streams, window_size=2, step_size=2)
# Put data into input streams.
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
# Run the decorated function.
run()
# Check values of output streams.
assert recent_values(t) == [
403, 415, 427, 439, 451, 463,
475, 487, 499, 511]
#----------------------------------------------
# Merge list of streams with keyword argument:
# window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_w
def h(list_of_windows, addend):
window_0, window_1 = list_of_windows
return sum(window_0) + 2*sum(window_1) + addend
x = Stream()
y = Stream()
in_streams = [x, y]
t = h(in_streams, window_size=2, step_size=2, addend=1000)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
Stream.scheduler.step()
assert recent_values(t) == [
1403, 1415, 1427, 1439, 1451, 1463,
1475, 1487, 1499, 1511]
#----------------------------------------------
# Merge list of streams with state and keyword argument:
# window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_w
def h(list_of_windows, state, addend):
next_state = state + 1
window_0, window_1 = list_of_windows
return sum(window_0) + 2*sum(window_1) + addend + state, next_state
x = Stream()
y = Stream()
in_streams = [x, y]
t = h(in_streams, window_size=2, step_size=2, state=0, addend=1000)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
1403, 1416, 1429, 1442, 1455,
1468, 1481, 1494, 1507, 1520]
#----------------------------------------------
# Merge list of streams with state:
# window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_w
def h(list_of_windows, state):
next_state = state + 1
window_0, window_1 = list_of_windows
return sum(window_0) + 2*sum(window_1) + state, next_state
x = Stream()
y = Stream()
in_streams = [x, y]
t = h(in_streams, window_size=2, step_size=2, state=0)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
403, 416, 429, 442, 455, 468, 481, 494, 507, 520]
#----------------------------------------------
# Merge two streams: window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_2w
def h(window_x, window_y):
return sum(window_x) + 2*sum(window_y)
x = Stream()
y = Stream()
t = h(x, y, window_size=2, step_size=2)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
403, 415, 427, 439, 451, 463,
475, 487, 499, 511]
#----------------------------------------------
# Merge two streams with keyword argument:
# window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_2w
def h(window_0, window_1, addend):
return sum(window_0) + 2*sum(window_1) + addend
x = Stream()
y = Stream()
in_streams = [x, y]
t = h(x, y, window_size=2, step_size=2, addend=1000)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
1403, 1415, 1427, 1439, 1451, 1463,
1475, 1487, 1499, 1511]
#----------------------------------------------
# Merge two streams with state and keyword argument:
# window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_2w
def h(window_0, window_1, state, addend):
next_state = state + 1
return ((sum(window_0) + 2*sum(window_1) +
addend + state), next_state)
x = Stream()
y = Stream()
in_streams = [x, y]
t = h(x, y, window_size=2, step_size=2, state=0, addend=1000)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
1403, 1416, 1429, 1442, 1455,
1468, 1481, 1494, 1507, 1520]
#----------------------------------------------
# Merge two streams with state:
# window operation.
# Decorate a conventional function to get a
# stream function.
@fmerge_2w
def h(window_0, window_1, state):
next_state = state + 1
return sum(window_0) + 2*sum(window_1) + state, next_state
x = Stream()
y = Stream()
in_streams = [x, y]
t = h(x, y, window_size=2, step_size=2, state=0)
x.extend(list(range(20)))
y.extend(list(range(100, 120)))
run()
assert recent_values(t) == [
403, 416, 429, 442, 455, 468,
481, 494, 507, 520]
if __name__ == '__main__':
examples_merge()
|
AssembleSoftware/IoTPy
|
examples/merge/examples_merge.py
|
Python
|
bsd-3-clause
| 12,608
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# kboard documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 16 16:12:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'kboard'
copyright = '2016, kboard team'
author = 'kboard team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'kboard v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'kboarddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'kboard.tex', 'kboard Documentation',
'kboard team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kboard', 'kboard Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'kboard', 'kboard Documentation',
author, 'kboard', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
hyesun03/k-board
|
docs/source/conf.py
|
Python
|
mit
| 9,947
|
"""
Preprocess Criteo dataset. This dataset was used for the Display Advertising
Challenge (https://www.kaggle.com/c/criteo-display-ad-challenge).
"""
import os
import sys
import click
import random
import collections
# There are 13 integer features and 26 categorical features
continous_features = range(1, 14)
categorial_features = range(14, 40)
# Clip integer features. The clip point for each integer feature
# is derived from the 95% quantile of the total values in each feature
continous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]
class CategoryDictGenerator:
"""
Generate dictionary for each of the categorical features
"""
def __init__(self, num_feature):
self.dicts = []
self.num_feature = num_feature
for i in range(0, num_feature):
self.dicts.append(collections.defaultdict(int))
def build(self, datafile, categorial_features, cutoff=0):
with open(datafile, 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
for i in range(0, self.num_feature):
if features[categorial_features[i]] != '':
self.dicts[i][features[categorial_features[i]]] += 1
for i in range(0, self.num_feature):
self.dicts[i] = filter(lambda x: x[1] >= cutoff,
self.dicts[i].items())
self.dicts[i] = sorted(self.dicts[i], key=lambda x: (-x[1], x[0]))
vocabs, _ = list(zip(*self.dicts[i]))
self.dicts[i] = dict(zip(vocabs, range(1, len(vocabs) + 1)))
self.dicts[i]['<unk>'] = 0
def gen(self, idx, key):
if key not in self.dicts[idx]:
res = self.dicts[idx]['<unk>']
else:
res = self.dicts[idx][key]
return res
def dicts_sizes(self):
return map(len, self.dicts)
class ContinuousFeatureGenerator:
"""
Normalize the integer features to [0, 1] by min-max normalization
"""
def __init__(self, num_feature):
self.num_feature = num_feature
self.min = [sys.maxint] * num_feature
self.max = [-sys.maxint] * num_feature
def build(self, datafile, continous_features):
with open(datafile, 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
for i in range(0, self.num_feature):
val = features[continous_features[i]]
if val != '':
val = int(val)
if val > continous_clip[i]:
val = continous_clip[i]
self.min[i] = min(self.min[i], val)
self.max[i] = max(self.max[i], val)
def gen(self, idx, val):
if val == '':
return 0.0
val = float(val)
return (val - self.min[idx]) / (self.max[idx] - self.min[idx])
@click.command("preprocess")
@click.option("--datadir", type=str, help="Path to raw criteo dataset")
@click.option("--outdir", type=str, help="Path to save the processed data")
def preprocess(datadir, outdir):
"""
All the 13 integer features are normalzied to continous values and these
continous features are combined into one vecotr with dimension 13.
Each of the 26 categorical features are one-hot encoded and all the one-hot
vectors are combined into one sparse binary vector.
"""
dists = ContinuousFeatureGenerator(len(continous_features))
dists.build(os.path.join(datadir, 'train.txt'), continous_features)
dicts = CategoryDictGenerator(len(categorial_features))
dicts.build(
os.path.join(datadir, 'train.txt'), categorial_features, cutoff=200)
dict_sizes = dicts.dicts_sizes()
categorial_feature_offset = [0]
for i in range(1, len(categorial_features)):
offset = categorial_feature_offset[i - 1] + dict_sizes[i - 1]
categorial_feature_offset.append(offset)
random.seed(0)
# 90% of the data are used for training, and 10% of the data are used
# for validation.
with open(os.path.join(outdir, 'train.txt'), 'w') as out_train:
with open(os.path.join(outdir, 'valid.txt'), 'w') as out_valid:
with open(os.path.join(datadir, 'train.txt'), 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
continous_vals = []
for i in range(0, len(continous_features)):
val = dists.gen(i, features[continous_features[i]])
continous_vals.append("{0:.6f}".format(val).rstrip('0')
.rstrip('.'))
categorial_vals = []
for i in range(0, len(categorial_features)):
val = dicts.gen(i, features[categorial_features[
i]]) + categorial_feature_offset[i]
categorial_vals.append(str(val))
continous_vals = ','.join(continous_vals)
categorial_vals = ','.join(categorial_vals)
label = features[0]
if random.randint(0, 9999) % 10 != 0:
out_train.write('\t'.join(
[continous_vals, categorial_vals, label]) + '\n')
else:
out_valid.write('\t'.join(
[continous_vals, categorial_vals, label]) + '\n')
with open(os.path.join(outdir, 'test.txt'), 'w') as out:
with open(os.path.join(datadir, 'test.txt'), 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
continous_vals = []
for i in range(0, len(continous_features)):
val = dists.gen(i, features[continous_features[i] - 1])
continous_vals.append("{0:.6f}".format(val).rstrip('0')
.rstrip('.'))
categorial_vals = []
for i in range(0, len(categorial_features)):
val = dicts.gen(i, features[categorial_features[
i] - 1]) + categorial_feature_offset[i]
categorial_vals.append(str(val))
continous_vals = ','.join(continous_vals)
categorial_vals = ','.join(categorial_vals)
out.write('\t'.join([continous_vals, categorial_vals]) + '\n')
if __name__ == "__main__":
preprocess()
|
kuke/models
|
legacy/deep_fm/preprocess.py
|
Python
|
apache-2.0
| 6,588
|
from .config import PrecisionConfig
import numpy as np
import random
Q = 293973345475167247070445277780365744413
class MPCNatural(object):
def __init__(self, id, repo):
self.id = id
self.repo = repo
def get(self):
return sum(self.get_shares()) % Q
def get_shares(self):
others = list(map(lambda x: x.get_share(self.id), self.repo.siblings))
return others + [self.repo.ints[self.id]]
def gen_rand_id(self, length=2**32):
return np.random.randint(0, length)
def __add__(self, id):
new_id = self.gen_rand_id()
return self.repo.add(new_id, self.id, id.id, True)
def __sub__(self, id):
new_id = self.gen_rand_id()
return self.repo.sub(new_id, self.id, id.id, True)
def __mul__(self, x):
new_id = self.gen_rand_id()
if(type(x) == type(self)):
return self.repo.mult(new_id, self.id, x.id, True)
else:
return self.repo.mult_scalar(new_id, self.id, x, True)
def __truediv__(self, x):
new_id = self.gen_rand_id()
if(type(x) == type(self)):
return NotImplemented
else:
return self.repo.div_scalar(new_id, self.id, x, True)
def __repr__(self):
return str(self.get())
class MPCFixedPoint(object):
def __init__(self, raw, repo, config=None, raw_natural=None):
if(config is None):
self.config = PrecisionConfig()
else:
self.config = config
self.repo = repo
if(raw_natural is None):
self.raw_natural = self.repo.create_natural(self.encode(raw))
else:
self.raw_natural = raw_natural
def encode(self, rational):
upscaled = int(rational * self.config.BASE**self.config.PRECISION_FRACTIONAL)
field_element = upscaled % self.config.Q
return field_element
def decode(self, field_element):
upscaled = field_element if field_element <= self.config.Q / 2 else field_element - self.config.Q
rational = upscaled / self.config.BASE**self.config.PRECISION_FRACTIONAL
return rational
def get(self):
return self.decode(self.raw_natural.get()) % self.config.Q
def __add__(self, x):
if(type(x) != type(self)):
x = MPCFixedPoint(x, self.repo, config=self.config)
return MPCFixedPoint(None, self.repo, raw_natural=self.raw_natural + x.raw_natural)
def __sub__(self, x):
if(type(x) != type(self)):
x = MPCFixedPoint(x, self.repo, config=self.config)
return self.raw_natural - x.raw_natural
def __mul__(self, x):
if(type(x) != type(self)):
return MPCFixedPoint(None, self.repo, raw_natural=(self.raw_natural * self.encode(x))).truncate()
else:
return MPCFixedPoint(None, self.repo, raw_natural=(self.raw_natural * x.raw_natural)).truncate()
def __truediv__(self, x):
return self.raw_natural.__truediv__(self.encode(x))
def __repr__(self):
return str(self.get())
def truncate(self):
b = self.raw_natural + self.repo.create_natural_with_shares([self.config.BASE**(2 * self.config.PRECISION + 1), 0, 0])
mask = random.randrange(self.config.Q) % self.config.BASE**(self.config.PRECISION + self.config.PRECISION_FRACTIONAL + self.config.KAPPA)
mask_low = mask % self.config.BASE**self.config.PRECISION_FRACTIONAL
b_masked = (b + self.repo.create_natural_with_shares([mask, 0, 0])).get()
b_masked_low = b_masked % self.config.BASE**self.config.PRECISION_FRACTIONAL
b_low = self.repo.create_natural(b_masked_low) - self.repo.create_natural(mask_low)
c = self.raw_natural - b_low
d = c * self.config.INVERSE
self.raw_natural = d
return self
|
dipanshunagar/PySyft
|
syft/mpc/rss/scalar.py
|
Python
|
apache-2.0
| 3,827
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import Counter
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
C = Counter()
def travel(root):
if root:
C[root.val] += 1
travel(root.left)
travel(root.right)
travel(root)
maxcount = max(C.itervalues())
return [n for n, c in C.iteritems() if c == maxcount]
|
xiaonanln/myleetcode-python
|
src/501. Find Mode in Binary Search Tree.py
|
Python
|
apache-2.0
| 560
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for routing packageables to appropriate packagers."""
import collections
from typing import Any, Dict, List, Sequence, Tuple
from xmanager import xm
from xmanager.bazel import client as bazel_client
from xmanager.xm import pattern_matching
from xmanager.xm_local import executors
from xmanager.xm_local.packaging import bazel_tools
from xmanager.xm_local.packaging import cloud as cloud_packaging
from xmanager.xm_local.packaging import local as local_packaging
def _visit_vertex_spec(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
_: executors.VertexSpec,
):
del bazel_outputs
return cloud_packaging.package_cloud_executable(
packageable,
packageable.executable_spec,
)
def _visit_local_spec(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
_: executors.LocalSpec,
):
return local_packaging.package_for_local_executor(
bazel_outputs,
packageable,
packageable.executable_spec,
)
def _visit_kubernetes_spec(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
_: executors.KubernetesSpec,
):
del bazel_outputs
return cloud_packaging.package_cloud_executable(
packageable,
packageable.executable_spec,
)
def _throw_on_unknown_executor(
bazel_outputs: bazel_tools.TargetOutputs,
packageable: xm.Packageable,
executor: Any,
):
raise TypeError(f'Unsupported executor specification: {executor!r}. '
f'Packageable: {packageable!r}')
_PACKAGING_ROUTER = pattern_matching.match(
_visit_vertex_spec,
_visit_local_spec,
_visit_kubernetes_spec,
_throw_on_unknown_executor,
)
def _normalize_label(label: str, kind: str) -> str:
"""Attempts to correct the label if it does not point to the right target.
In certain cases people might specify labels that do not correspond to the
desired output. For example, for a `py_binary(name='foo', ...)` target the
self-contained executable is actually called 'foo.par'.
Args:
label: The target's name.
kind: The target's kind.
Returns:
Either the same or a corrected label.
"""
if kind == 'py_binary rule' and not label.endswith('.par'):
return f'{label}.par'
return label
_ArgsToTargets = Dict[Tuple[str, ...], List[bazel_client.BazelTarget]]
def package(packageables: Sequence[xm.Packageable]) -> List[xm.Executable]:
"""Routes a packageable to an appropriate packaging mechanism."""
built_targets: bazel_tools.TargetOutputs = {}
bazel_targets = bazel_tools.collect_bazel_targets(packageables)
if bazel_targets:
bazel_service = bazel_tools.local_bazel_service()
bazel_labels = [target.label for target in bazel_targets]
bazel_kinds = bazel_service.fetch_kinds(bazel_labels)
label_to_kind = dict(zip(bazel_labels, bazel_kinds))
args_to_targets: _ArgsToTargets = collections.defaultdict(list)
for target in bazel_targets:
args_to_targets[target.bazel_args].append(target)
for args, targets in args_to_targets.items():
outputs = bazel_service.build_targets(
labels=[
_normalize_label(target.label, label_to_kind[target.label])
for target in targets
],
bazel_args=args,
)
for target, output in zip(targets, outputs):
built_targets[target] = output
return [
_PACKAGING_ROUTER(built_targets, packageable, packageable.executor_spec)
for packageable in packageables
]
|
deepmind/xmanager
|
xmanager/xm_local/packaging/router.py
|
Python
|
apache-2.0
| 4,103
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import distutils.version
import os
import sys
import textwrap
import unittest
ROOT_PATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.dirname(__file__))))
def native_error(msg, version):
print textwrap.dedent("""\
ERROR: Native python-coverage (version: %s) is required to be
installed on your PYTHONPATH to run this test. Recommendation:
sudo apt-get install pip
sudo pip install --upgrade coverage
%s""") % (version, msg)
sys.exit(1)
def covered_main(includes, require_native=None, required_percentage=100.0):
"""Equivalent of unittest.main(), except that it gathers coverage data, and
asserts if the test is not at 100% coverage.
Args:
includes (list(str) or str) - List of paths to include in coverage report.
May also be a single path instead of a list.
require_native (str) - If non-None, will require that
at least |require_native| version of coverage is installed on the
system with CTracer.
"""
try:
import coverage
if require_native is not None:
got_ver = coverage.__version__
if not getattr(coverage.collector, 'CTracer', None):
native_error((
"Native python-coverage module required.\n"
"Pure-python implementation (version: %s) found: %s"
) % (got_ver, coverage), require_native)
if got_ver < distutils.version.LooseVersion(require_native):
native_error("Wrong version (%s) found: %s" % (got_ver, coverage),
require_native)
except ImportError:
if require_native is None:
sys.path.insert(0, os.path.join(ROOT_PATH, 'third_party'))
import coverage
else:
print ("ERROR: python-coverage (%s) is required to be installed on your "
"PYTHONPATH to run this test." % require_native)
sys.exit(1)
COVERAGE = coverage.coverage(include=includes)
COVERAGE.start()
retcode = 0
try:
unittest.main()
except SystemExit as e:
retcode = e.code or retcode
COVERAGE.stop()
if COVERAGE.report() < required_percentage:
print 'FATAL: not at required %f% coverage.' % required_percentage
retcode = 2
return retcode
|
withtone/depot_tools
|
testing_support/coverage_utils.py
|
Python
|
bsd-3-clause
| 2,321
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <mathieu@mblondel.org>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W, np.sqrt(beta) * np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
def inverse_transform(self, W):
"""
Parameters
----------
W: {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed Data matrix
Returns
-------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
|
fabioticconi/scikit-learn
|
sklearn/decomposition/nmf.py
|
Python
|
bsd-3-clause
| 46,993
|
from __future__ import division, absolute_import, print_function
import sys
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import textwrap
from os import path
import pytest
import numpy as np
from numpy.compat import Path
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, temppath
)
from numpy.compat import pickle
class TestFromrecords(object):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
if sys.version_info[0] >= 3:
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
else:
assert_equal(r['col2'].dtype.kind, 'S')
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
def test_fromrecords_0len(self):
""" Verify fromrecords works with a 0-length input """
dtype = [('a', float), ('b', float)]
r = np.rec.fromrecords([], dtype=dtype)
assert_equal(r.shape, (0,))
def test_fromrecords_2d(self):
data = [
[(1, 2), (3, 4), (5, 6)],
[(6, 5), (4, 3), (2, 1)]
]
expected_a = [[1, 3, 5], [6, 4, 2]]
expected_b = [[2, 4, 6], [5, 3, 1]]
# try with dtype
r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
assert_equal(r1['a'], expected_a)
assert_equal(r1['b'], expected_b)
# try with names
r2 = np.rec.fromrecords(data, names=['a', 'b'])
assert_equal(r2['a'], expected_a)
assert_equal(r2['b'], expected_b)
assert_equal(r1, r2)
def test_method_array(self):
r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, b'efg', 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, b'b'))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek(2880 * 2)
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_repr(self):
a = np.array([(1, 0.1), (2, 0.2)],
dtype=[('foo', '<i4'), ('bar', '<f8')])
a = np.rec.array(a)
assert_equal(
repr(a),
textwrap.dedent("""\
rec.array([(1, 0.1), (2, 0.2)],
dtype=[('foo', '<i4'), ('bar', '<f8')])""")
)
# make sure non-structured dtypes also show up as rec.array
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
# check that the 'np.record' part of the dtype isn't shown
a = np.rec.array(np.ones(3, dtype='i4,i4'))
assert_equal(repr(a).find('numpy.record'), -1)
a = np.rec.array(np.ones(3, dtype='i4'))
assert_(repr(a).find('dtype=int32') != -1)
def test_0d_recarray_repr(self):
arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
assert_equal(repr(arr_0d), textwrap.dedent("""\
rec.array((1, 2., '2003'),
dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"""))
record = arr_0d[()]
assert_equal(repr(record), "(1, 2., '2003')")
# 1.13 converted to python scalars before the repr
try:
np.set_printoptions(legacy='1.13')
assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
finally:
np.set_printoptions(legacy=False)
def test_recarray_from_repr(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1,2,3,4,5], dtype=np.int64)
#check that np.rec.array gives right dtypes
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
#check that viewing as recarray does the same
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
#check that view to non-structured dtype preserves type=np.recarray
r = np.rec.array(np.ones(4, dtype="f4,i4"))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
#check that getitem also preserves np.recarray and np.record
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
c = r.view(C)
assert_equal(type(c['c']), C)
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
('d', ('i8', 'i4,i4'))]
r = np.rec.array([((1,1), b'11111111', [1,1], 1),
((1,1), b'11111111', [1,1], 1)], dtype=test_dtype)
assert_equal(r.a.dtype.type, np.record)
assert_equal(r.b.dtype.type, np.void)
assert_equal(r.c.dtype.type, np.float32)
assert_equal(r.d.dtype.type, np.int64)
# check the same, but for views
r = np.rec.array(np.ones(4, dtype='i4,i4'))
assert_equal(r.view('f4,f4').dtype.type, np.record)
assert_equal(r.view(('i4',2)).dtype.type, np.int32)
assert_equal(r.view('V8').dtype.type, np.void)
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
#check that we can undo the view
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
# recommended way to view as an ndarray:
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
pa = np.rec.fromrecords([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
(3, 'wrs', 1.3)],
names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_(type(ra.mean) is type(ra.var))
ra.shape = (1, 3)
assert_(ra.shape == (1, 3))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections_abc.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
dtype=[('a', int), ('b', object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
ndtype = np.dtype([('a', int), ('b', object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
def test_recarray_stringtypes(self):
# Issue #3993
a = np.array([('abc ', 1), ('abc', 2)],
dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal(a.foo[0] == a.foo[1], False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
('abc', (2,3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, b'fgehi')
assert_equal(a[0].qux['D'], b'fgehi')
assert_equal(a[0]['qux'].D, b'fgehi')
assert_equal(a[0]['qux']['D'], b'fgehi')
def test_zero_width_strings(self):
# Test for #6430, based on the test case from #1901
cols = [['test'] * 3, [''] * 3]
rec = np.rec.fromarrays(cols)
assert_equal(rec['f0'], ['test', 'test', 'test'])
assert_equal(rec['f1'], ['', '', ''])
dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
rec = np.rec.fromarrays(cols, dtype=dt)
assert_equal(rec.itemsize, 4)
assert_equal(rec['f0'], [b'test', b'test', b'test'])
assert_equal(rec['f1'], [b'', b'', b''])
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage(object):
# Test that pathlib.Path can be used
def test_tofile_fromfile(self):
with temppath(suffix='.bin') as path:
path = Path(path)
np.random.seed(123)
a = np.random.rand(10).astype('f8,i4,a5')
a[5] = (0.5,10,'abcde')
with path.open("wb") as fd:
a.tofile(fd)
x = np.core.records.fromfile(path,
formats='f8,i4,a5',
shape=10)
assert_array_equal(x, a)
class TestRecord(object):
def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
("col3", "<i4")])
def test_assignment1(self):
a = self.data
assert_equal(a.col1[0], 1)
a[0].col1 = 0
assert_equal(a.col1[0], 0)
def test_assignment2(self):
a = self.data
assert_equal(a.col1[0], 1)
a.col1[0] = 0
assert_equal(a.col1[0], 0)
def test_invalid_assignment(self):
a = self.data
def assign_invalid_column(x):
x[0].col5 = 1
assert_raises(AttributeError, assign_invalid_column, a)
def test_nonwriteable_setfield(self):
# gh-8171
r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
r.flags.writeable = False
with assert_raises(ValueError):
r.f = [2, 3]
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
def test_out_of_order_fields(self):
# names in the same order, padding added to descr
x = self.data[['col1', 'col2']]
assert_equal(x.dtype.names, ('col1', 'col2'))
assert_equal(x.dtype.descr,
[('col1', '<i4'), ('col2', '<i4'), ('', '|V4')])
# names change order to match indexing, as of 1.14 - descr can't
# represent that
y = self.data[['col2', 'col1']]
assert_equal(y.dtype.names, ('col2', 'col1'))
assert_raises(ValueError, lambda: y.dtype.descr)
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
protocol=proto)))
def test_pickle_2(self):
a = self.data
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
protocol=proto)))
def test_pickle_3(self):
# Issue #7140
a = self.data
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pa = pickle.loads(pickle.dumps(a[0], protocol=proto))
assert_(pa.flags.c_contiguous)
assert_(pa.flags.f_contiguous)
assert_(pa.flags.writeable)
assert_(pa.flags.aligned)
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
r = np.zeros((1,3), dtype=dt).view(np.recarray)
r.foo = np.array([1, 2, 3]) # TypeError?
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
rec = np.recarray(1, dtype=[('x', float, 5)])
rec[0].x = 1
assert_equal(rec[0].x, np.ones(5))
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
def test_fromarrays_nested_structured_arrays(self):
arrays = [
np.arange(10),
np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]),
]
arr = np.rec.fromarrays(arrays) # ValueError?
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l2 = [1, 2, 1, 4, 5, 6]
assert_(np.rec.find_duplicate(l2) == [1])
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [1, 2])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
|
shoyer/numpy
|
numpy/core/tests/test_records.py
|
Python
|
bsd-3-clause
| 17,957
|
# Copyright (C) 2017-2020 Greenweaves Software Limited
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# This file contains a collection of functions to parse FASTA format data
# FastaContent
#
# This class is used to parse FASTA format data from a text string
class FastaContent(object):
# parse text and build data structures
def __init__(self,file_text):
self.pairs = [] # Description + data -- all text so fae
nn = '' # Description -- current
text = '' # data -- current
for line in file_text:
line = line.strip()
if line.startswith('>'): # Description
if len(nn)>0:
self.pairs.append((nn,text))
nn = line[1:]
text = ''
else: # Data
text = text+line
if len(nn)>0:
self.pairs.append((nn,text))
# __getitem__
#
# Retrieve items (pairs) by index
def __getitem__(self,index):
return self.pairs[index]
# __len__
#
# Number of entries
def __len__(self):
return len(self.pairs)
# to_list
#
# Used if caller wants a simple list seq,value, seq,value,...
def to_list(self):
List = []
for a,b in self.pairs:
List.append(a)
List.append(b)
return List
#to_dict
#
# Used if caller wants a dictionary
def to_dict(self):
Dict = {}
for a,b in self.pairs:
Dict[a]=b
return Dict
# FastaFile
#
# This class is used to parse Fasta data from a file
class FastaFile(FastaContent):
# parse file and build data structures
def __init__(self,name):
with open(name,'r') as file_text:
FastaContent.__init__(self,file_text)
# fasta_out
#
# Generator, used to output one key value pair to a file in FASTA format
#
# Parameters:
# key Omit '>', as this will be added
# value Value string
# max_length Used to split lines so none exceeds this length
def fasta_out(key,value,max_length=800000000000):
yield f'>{key}'
remainder = value
while len(remainder)>0:
yield remainder[0:max_length]
remainder = remainder[max_length:]
|
weka511/bioinformatics
|
fasta.py
|
Python
|
gpl-3.0
| 2,986
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
)
MIDDLEWARE_CLASSES += Common.MIDDLEWARE_CLASSES
# END MIDDLEWARE CONFIGURATION
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('mdid3 <noreply@testmdid3.local>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[mdid3] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party library settings
|
hanleybrand/mdid3-core-a-goof-around
|
mdid3/config/production.py
|
Python
|
bsd-3-clause
| 4,733
|
import ngspyce
import numpy as np
from matplotlib import pyplot as plt
ngspyce.source('quad.net')
#trrandom(2 2m 0 10m 0)
ngspyce.cmd('tran 1m 20m')
print('\n'.join(ngspyce.vector_names()))
time, vsin, vcos = map(ngspyce.vector, ['time', 'Vsin', 'Vcos'])
#np.savetxt('vcos.txt', vcos)
#np.savetxt('vsin.txt', vsin)
#np.savetxt('time.txt', time)
#exit()
plt.plot(time, vcos, label='Vcos')
plt.plot(time, vsin, label='Vsin')
plt.legend()
plt.show()
|
ignamv/ngspyce
|
examples/quadrature_oscillator/quadosc.py
|
Python
|
gpl-2.0
| 448
|
# -*- coding: utf-8 -*-
#
# Copyright 2010-2015 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Commit objects."""
from __future__ import absolute_import
from __future__ import unicode_literals
import operator
import unittest
from pygit2 import TreeEntry, GIT_FILEMODE_TREE
from . import utils
TREE_SHA = '967fce8df97cc71722d3c2a5930ef3e6f1d27b12'
SUBTREE_SHA = '614fd9a3094bf618ea938fffc00e7d1a54f89ad0'
class TreeTest(utils.BareRepoTestCase):
def assertTreeEntryEqual(self, entry, sha, name, filemode):
self.assertEqual(entry.hex, sha)
self.assertEqual(entry.name, name)
self.assertEqual(entry._name, name.encode('utf-8'))
self.assertEqual(entry.filemode, filemode,
'0%o != 0%o' % (entry.filemode, filemode))
def test_read_tree(self):
tree = self.repo[TREE_SHA]
self.assertRaises(TypeError, lambda: tree[()])
self.assertRaisesWithArg(KeyError, 'abcd', lambda: tree['abcd'])
self.assertRaisesWithArg(IndexError, -4, lambda: tree[-4])
self.assertRaisesWithArg(IndexError, 3, lambda: tree[3])
self.assertEqual(3, len(tree))
sha = '7f129fd57e31e935c6d60a0c794efe4e6927664b'
self.assertTrue('a' in tree)
self.assertTreeEntryEqual(tree[0], sha, 'a', 0o0100644)
self.assertTreeEntryEqual(tree[-3], sha, 'a', 0o0100644)
self.assertTreeEntryEqual(tree['a'], sha, 'a', 0o0100644)
sha = '85f120ee4dac60d0719fd51731e4199aa5a37df6'
self.assertTrue('b' in tree)
self.assertTreeEntryEqual(tree[1], sha, 'b', 0o0100644)
self.assertTreeEntryEqual(tree[-2], sha, 'b', 0o0100644)
self.assertTreeEntryEqual(tree['b'], sha, 'b', 0o0100644)
sha = '297efb891a47de80be0cfe9c639e4b8c9b450989'
self.assertTreeEntryEqual(tree['c/d'], sha, 'd', 0o0100644)
self.assertRaisesWithArg(KeyError, 'ab/cd', lambda: tree['ab/cd'])
def test_equality(self):
tree_a = self.repo['18e2d2e9db075f9eb43bcb2daa65a2867d29a15e']
tree_b = self.repo['2ad1d3456c5c4a1c9e40aeeddb9cd20b409623c8']
self.assertNotEqual(tree_a['a'], tree_b['a'])
self.assertNotEqual(tree_a['a'], tree_b['b'])
self.assertEqual(tree_a['b'], tree_b['b'])
def test_sorting(self):
tree_a = self.repo['18e2d2e9db075f9eb43bcb2daa65a2867d29a15e']
self.assertEqual(list(tree_a), sorted(reversed(list(tree_a))))
self.assertNotEqual(list(tree_a), reversed(list(tree_a)))
def test_read_subtree(self):
tree = self.repo[TREE_SHA]
subtree_entry = tree['c']
self.assertTreeEntryEqual(subtree_entry, SUBTREE_SHA, 'c', 0o0040000)
self.assertEqual(subtree_entry.type, 'tree')
subtree = self.repo[subtree_entry.id]
self.assertEqual(1, len(subtree))
sha = '297efb891a47de80be0cfe9c639e4b8c9b450989'
self.assertTreeEntryEqual(subtree[0], sha, 'd', 0o0100644)
def test_new_tree(self):
repo = self.repo
b0 = repo.create_blob('1')
b1 = repo.create_blob('2')
st = repo.TreeBuilder()
st.insert('a', b0, 0o0100644)
subtree = repo[st.write()]
t = repo.TreeBuilder()
t.insert('x', b0, 0o0100644)
t.insert('y', b1, 0o0100755)
t.insert('z', subtree.id, GIT_FILEMODE_TREE)
tree = repo[t.write()]
self.assertTrue('x' in tree)
self.assertTrue('y' in tree)
self.assertTrue('z' in tree)
x = tree['x']
y = tree['y']
z = tree['z']
self.assertEqual(x.filemode, 0o0100644)
self.assertEqual(y.filemode, 0o0100755)
self.assertEqual(z.filemode, GIT_FILEMODE_TREE)
self.assertEqual(repo[x.id].id, b0)
self.assertEqual(repo[y.id].id, b1)
self.assertEqual(repo[z.id].id, subtree.id)
self.assertEqual(x.type, 'blob')
self.assertEqual(y.type, 'blob')
self.assertEqual(z.type, 'tree')
def test_modify_tree(self):
tree = self.repo[TREE_SHA]
self.assertRaises(TypeError, operator.setitem, 'c', tree['a'])
self.assertRaises(TypeError, operator.delitem, 'c')
def test_iterate_tree(self):
"""
Testing that we're able to iterate of a Tree object and that the
resulting sha strings are consitent with the sha strings we could
get with other Tree access methods.
"""
tree = self.repo[TREE_SHA]
for tree_entry in tree:
self.assertEqual(tree_entry, tree[tree_entry.name])
def test_deep_contains(self):
tree = self.repo[TREE_SHA]
self.assertTrue('a' in tree)
self.assertTrue('c' in tree)
self.assertTrue('c/d' in tree)
self.assertFalse('c/e' in tree)
self.assertFalse('d' in tree)
if __name__ == '__main__':
unittest.main()
|
Sheeo/pygit2
|
test/test_tree.py
|
Python
|
gpl-2.0
| 5,992
|
from PySide.QtCore import *
from PySide.QtGui import *
import unittest
class MyModel (QAbstractListModel):
stupidLine = QLine(0, 0, 10, 10)
def rowCount(self, parent):
return 1
def data(self, index, role):
return self.stupidLine
class TestBug693(unittest.TestCase):
def testIt(self):
app = QApplication([])
model = MyModel()
view = QListView()
view.setModel(model)
view.show()
# This must NOT throw the exception:
# RuntimeError: Internal C++ object (PySide.QtCore.QLine) already deleted.
MyModel.stupidLine.isNull()
if __name__ == "__main__":
unittest.main()
|
enthought/pyside
|
tests/QtGui/bug_693.py
|
Python
|
lgpl-2.1
| 670
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from models import Project
class ProjectsTest(TestCase):
fixtures = ['test_data.json']
def test_project_listing(self):
"""
Verify that the project listing page contains all projects within the
page's context.
"""
response = self.client.get(reverse("projects:list"))
self.failUnlessEqual(response.status_code, 200)
try:
response.context['project_list']
except KeyError:
self.fail("Template context did not contain project_list object.")
for project in Project.objects.published():
self.assertTrue(project in response.context['project_list'])
def test_verify_author_detail_pages(self):
"""
Verify that each author has a detail page and that the author is
contained within the page's context.
"""
for project in Project.objects.all():
response = self.client.get(project.get_absolute_url())
if project.published():
self.assertTrue(response.status_code == 200)
try:
self.failUnlessEqual(response.context['project'], project)
except KeyError:
self.fail("Template context did not contain project object.")
else:
self.assertTrue(response.status_code == 404)
|
mazelife/django-belleville
|
belleville/projects/tests.py
|
Python
|
apache-2.0
| 1,492
|
# -*- coding: UTF-8 -*-
#
"""ServerStore tester"""
class GnrCustomWebPage(object):
py_requires = "gnrcomponents/testhandler:TestHandlerFull,storetester:StoreTester"
dojo_theme = 'claro'
def test_1_current_page(self, pane):
"""On current page """
self.common_form(pane, datapath='test_1')
def test_2_external_page(self, pane):
"""Set in external store"""
center = self.common_pages_container(pane, height='350px', background='whitesmoke',
datapath='test_2')
self.common_form(center)
def test_3_server_data(self, pane):
"""Server shared data """
center = self.common_pages_container(pane, height='350px', background='whitesmoke',
datapath='test_3')
center.data('.foo.bar', _serverpath='xx')
fb = center.formbuilder(cols=1, border_spacing='3px')
fb.textbox(value='^.foo.bar', lbl='Server store value')
fb.textbox(value='^.foo.baz', lbl='Value not in server subscribed path')
fb.button('Ping', action='genro.ping()')
|
poppogbr/genropy
|
packages/test15/webpages/tools/server_store.py
|
Python
|
lgpl-2.1
| 1,125
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.layout_tests.models.test_configuration import *
def make_mock_all_test_configurations_set():
all_test_configurations = set()
for version, architecture in (('snowleopard', 'x86'), ('xp', 'x86'), ('win7', 'x86'), ('vista', 'x86'), ('lucid', 'x86'), ('lucid', 'x86_64')):
for build_type in ('debug', 'release'):
all_test_configurations.add(TestConfiguration(version, architecture, build_type))
return all_test_configurations
MOCK_MACROS = {
'mac': ['snowleopard'],
'win': ['xp', 'vista', 'win7'],
'linux': ['lucid'],
}
class TestConfigurationTest(unittest.TestCase):
def test_items(self):
config = TestConfiguration('xp', 'x86', 'release')
result_config_dict = {}
for category, specifier in config.items():
result_config_dict[category] = specifier
self.assertEqual({'version': 'xp', 'architecture': 'x86', 'build_type': 'release'}, result_config_dict)
def test_keys(self):
config = TestConfiguration('xp', 'x86', 'release')
result_config_keys = []
for category in config.keys():
result_config_keys.append(category)
self.assertEqual(set(['version', 'architecture', 'build_type']), set(result_config_keys))
def test_str(self):
config = TestConfiguration('xp', 'x86', 'release')
self.assertEqual('<xp, x86, release>', str(config))
def test_repr(self):
config = TestConfiguration('xp', 'x86', 'release')
self.assertEqual("TestConfig(version='xp', architecture='x86', build_type='release')", repr(config))
def test_hash(self):
config_dict = {}
config_dict[TestConfiguration('xp', 'x86', 'release')] = True
self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release')])
def query_unknown_key():
return config_dict[TestConfiguration('xp', 'x86', 'debug')]
self.assertRaises(KeyError, query_unknown_key)
self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
self.assertNotIn(TestConfiguration('xp', 'x86', 'debug'), config_dict)
configs_list = [TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'), TestConfiguration('xp', 'x86', 'debug')]
self.assertEqual(len(configs_list), 3)
self.assertEqual(len(set(configs_list)), 2)
def test_eq(self):
self.assertEqual(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'release'))
self.assertNotEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'))
def test_values(self):
config = TestConfiguration('xp', 'x86', 'release')
result_config_values = []
for value in config.values():
result_config_values.append(value)
self.assertEqual(set(['xp', 'x86', 'release']), set(result_config_values))
class SpecifierSorterTest(unittest.TestCase):
def __init__(self, testFunc):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
def test_init(self):
sorter = SpecifierSorter()
self.assertIsNone(sorter.category_for_specifier('control'))
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_for_specifier('xp'), 'version')
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(sorter.category_for_specifier('mac'), 'version')
def test_add_specifier(self):
sorter = SpecifierSorter()
self.assertIsNone(sorter.category_for_specifier('control'))
sorter.add_specifier('version', 'control')
self.assertEqual(sorter.category_for_specifier('control'), 'version')
sorter.add_specifier('version', 'one')
self.assertEqual(sorter.category_for_specifier('one'), 'version')
sorter.add_specifier('architecture', 'renaissance')
self.assertEqual(sorter.category_for_specifier('one'), 'version')
self.assertEqual(sorter.category_for_specifier('renaissance'), 'architecture')
def test_add_macros(self):
sorter = SpecifierSorter(self._all_test_configurations)
sorter.add_macros(MOCK_MACROS)
self.assertEqual(sorter.category_for_specifier('mac'), 'version')
self.assertEqual(sorter.category_for_specifier('win'), 'version')
self.assertEqual(sorter.category_for_specifier('x86'), 'architecture')
def test_category_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_priority('version'), 0)
self.assertEqual(sorter.category_priority('build_type'), 2)
def test_specifier_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.specifier_priority('x86'), 1)
self.assertEqual(sorter.specifier_priority('snowleopard'), 0)
def test_sort_specifiers(self):
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(sorter.sort_specifiers(set()), [])
self.assertEqual(sorter.sort_specifiers(set(['x86'])), ['x86'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'win7'])), ['win7', 'x86'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'debug', 'win7'])), ['win7', 'x86', 'debug'])
self.assertEqual(sorter.sort_specifiers(set(['snowleopard', 'x86', 'debug', 'win7'])), ['snowleopard', 'win7', 'x86', 'debug'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug'])
class TestConfigurationConverterTest(unittest.TestCase):
def __init__(self, testFunc):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
def test_symmetric_difference(self):
self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c'])]), set(['a', 'c']))
self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c']), set(['b', 'd'])]), set(['a', 'c', 'd']))
def test_to_config_set(self):
converter = TestConfigurationConverter(self._all_test_configurations)
self.assertEqual(converter.to_config_set(set()), self._all_test_configurations)
self.assertEqual(converter.to_config_set(set(['foo'])), set())
self.assertEqual(converter.to_config_set(set(['xp', 'foo'])), set())
errors = []
self.assertEqual(converter.to_config_set(set(['xp', 'foo']), errors), set())
self.assertEqual(errors, ["Unrecognized specifier 'foo'"])
self.assertEqual(converter.to_config_set(set(['xp', 'x86_64'])), set())
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['xp', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_config_set(set(['release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'release'),
TestConfiguration('lucid', 'x86_64', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['x86_64'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'release'),
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'debug'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'debug'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard', 'x86'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard', 'release'])), configs_to_match)
def test_macro_expansion(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'lucid', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'mac', 'release'])), configs_to_match)
def test_to_specifier_lists(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(converter.to_specifiers_list(set(self._all_test_configurations)), [[]])
self.assertEqual(converter.to_specifiers_list(set()), [])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'debug'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['xp'])])
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('xp', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'x86_64', 'linux'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('lucid', 'x86', 'debug'),
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('lucid', 'x86', 'debug'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'linux'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['xp', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'debug'),
TestConfiguration('lucid', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win7']), set(['release', 'linux', 'x86']), set(['release', 'xp', 'mac'])])
def test_macro_collapsing(self):
macros = {'foo': ['bar', 'baz'], 'people': ['bob', 'alice', 'john']}
specifiers_list = [set(['john', 'godzilla', 'bob', 'alice'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['people', 'godzilla'])])
specifiers_list = [set(['john', 'godzilla', 'alice'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['john', 'godzilla', 'alice', 'godzilla'])])
specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob', 'alice', 'john'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['foo', 'godzilla', 'people'])])
specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob']), set(['bar', 'baz']), set(['people', 'alice', 'bob', 'john'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['bob', 'foo', 'godzilla']), set(['foo']), set(['people'])])
def test_converter_macro_collapsing(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
def test_specifier_converter_access(self):
specifier_sorter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS).specifier_sorter()
self.assertEqual(specifier_sorter.category_for_specifier('snowleopard'), 'version')
self.assertEqual(specifier_sorter.category_for_specifier('mac'), 'version')
|
weolar/miniblink49
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
|
Python
|
apache-2.0
| 18,084
|
import numpy as np
from scipy import optimize
from numpy.testing import assert_allclose
from scipy.special import factorial, xlogy
from itertools import product
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import max_error
from sklearn.metrics import mean_pinball_loss
from sklearn.metrics import r2_score
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import d2_tweedie_score
from sklearn.metrics import make_scorer
from sklearn.metrics._regression import _check_reg_targets
from sklearn.exceptions import UndefinedMetricWarning
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
y_pred_2 = y_true - 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.0)
assert_almost_equal(
mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred)),
)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.0)
assert_almost_equal(mean_pinball_loss(y_true, y_pred), 0.5)
assert_almost_equal(mean_pinball_loss(y_true, y_pred_2), 0.5)
assert_almost_equal(mean_pinball_loss(y_true, y_pred, alpha=0.4), 0.6)
assert_almost_equal(mean_pinball_loss(y_true, y_pred_2, alpha=0.4), 0.4)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.0)
mape = mean_absolute_percentage_error(y_true, y_pred)
assert np.isfinite(mape)
assert mape > 1e6
assert_almost_equal(max_error(y_true, y_pred), 1.0)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(r2_score(y_true, y_pred, force_finite=False), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.0)
assert_almost_equal(
explained_variance_score(y_true, y_pred, force_finite=False), 1.0
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=0),
mean_squared_error(y_true, y_pred),
)
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=0), r2_score(y_true, y_pred)
)
# Tweedie deviance needs positive y_pred, except for p=0,
# p>=2 needs positive y_true
# results evaluated by sympy
y_true = np.arange(1, 1 + n_samples)
y_pred = 2 * y_true
n = n_samples
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=-1),
5 / 12 * n * (n**2 + 2 * n + 1),
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=1), (n + 1) * (1 - np.log(2))
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=2), 2 * np.log(2) - 1
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=3 / 2),
((6 * np.sqrt(2) - 8) / n) * np.sqrt(y_true).sum(),
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=3), np.sum(1 / y_true) / (4 * n)
)
dev_mean = 2 * np.mean(xlogy(y_true, 2 * y_true / (n + 1)))
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=1),
1 - (n + 1) * (1 - np.log(2)) / dev_mean,
)
dev_mean = 2 * np.log((n + 1) / 2) - 2 / n * np.log(factorial(n))
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=2), 1 - (2 * np.log(2) - 1) / dev_mean
)
def test_mean_squared_error_multioutput_raw_value_squared():
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/16323
mse1 = mean_squared_error([[1]], [[10]], multioutput="raw_values", squared=True)
mse2 = mean_squared_error([[1]], [[10]], multioutput="raw_values", squared=False)
assert np.sqrt(mse1) == pytest.approx(mse2)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1.0 / 3 + 2.0 / 3 + 2.0 / 3) / 4.0)
error = mean_squared_error(y_true, y_pred, squared=False)
assert_almost_equal(error, 0.454, decimal=2)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 4.0)
error = mean_pinball_loss(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 8.0)
error = np.around(mean_absolute_percentage_error(y_true, y_pred), decimals=2)
assert np.isfinite(error)
assert error > 1e6
error = median_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 1.0) / 4.0)
error = r2_score(y_true, y_pred, multioutput="variance_weighted")
assert_almost_equal(error, 1.0 - 5.0 / 2)
error = r2_score(y_true, y_pred, multioutput="uniform_average")
assert_almost_equal(error, -0.875)
# constant `y_true` with force_finite=True leads to 1. or 0.
yc = [5.0, 5.0]
error = r2_score(yc, [5.0, 5.0], multioutput="variance_weighted")
assert_almost_equal(error, 1.0)
error = r2_score(yc, [5.0, 5.1], multioutput="variance_weighted")
assert_almost_equal(error, 0.0)
# Setting force_finite=False results in the nan for 4th output propagating
error = r2_score(
y_true, y_pred, multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, np.nan)
error = r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False)
assert_almost_equal(error, np.nan)
# Dropping the 4th output to check `force_finite=False` for nominal
y_true = y_true[:, :-1]
y_pred = y_pred[:, :-1]
error = r2_score(y_true, y_pred, multioutput="variance_weighted")
error2 = r2_score(
y_true, y_pred, multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, error2)
error = r2_score(y_true, y_pred, multioutput="uniform_average")
error2 = r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False)
assert_almost_equal(error, error2)
# constant `y_true` with force_finite=False leads to NaN or -Inf.
error = r2_score(
yc, [5.0, 5.0], multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, np.nan)
error = r2_score(
yc, [5.0, 6.0], multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, -np.inf)
def test_regression_metrics_at_limits():
# Single-sample case
# Note: for r2 and d2_tweedie see also test_regression_single_sample
assert_almost_equal(mean_squared_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_squared_error([0.0], [0.0], squared=False), 0.0)
assert_almost_equal(mean_squared_log_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_absolute_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_pinball_loss([0.0], [0.0]), 0.0)
assert_almost_equal(mean_absolute_percentage_error([0.0], [0.0]), 0.0)
assert_almost_equal(median_absolute_error([0.0], [0.0]), 0.0)
assert_almost_equal(max_error([0.0], [0.0]), 0.0)
assert_almost_equal(explained_variance_score([0.0], [0.0]), 1.0)
# Perfect cases
assert_almost_equal(r2_score([0.0, 1], [0.0, 1]), 1.0)
# Non-finite cases
# R² and explained variance have a fix by default for non-finite cases
for s in (r2_score, explained_variance_score):
assert_almost_equal(s([0, 0], [1, -1]), 0.0)
assert_almost_equal(s([0, 0], [1, -1], force_finite=False), -np.inf)
assert_almost_equal(s([1, 1], [1, 1]), 1.0)
assert_almost_equal(s([1, 1], [1, 1], force_finite=False), np.nan)
msg = (
"Mean Squared Logarithmic Error cannot be used when targets "
"contain negative values."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([-1.0], [-1.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when targets "
"contain negative values."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([1.0, 2.0, 3.0], [1.0, -2.0, 3.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when targets "
"contain negative values."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([1.0, -2.0, 3.0], [1.0, 2.0, 3.0])
# Tweedie deviance error
power = -1.2
assert_allclose(
mean_tweedie_deviance([0], [1.0], power=power), 2 / (2 - power), rtol=1e-3
)
msg = "can only be used on strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
assert_almost_equal(mean_tweedie_deviance([0.0], [0.0], power=0), 0.0, 2)
power = 1.0
msg = "only be used on non-negative y and strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 1.5
assert_allclose(mean_tweedie_deviance([0.0], [1.0], power=power), 2 / (2 - power))
msg = "only be used on non-negative y and strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 2.0
assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8)
msg = "can only be used on strictly positive y and y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 3.0
assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8)
msg = "can only be used on strictly positive y and y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 0.5
with pytest.raises(ValueError, match="is only defined for power<=0 and power>=1"):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match="is only defined for power<=0 and power>=1"):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(y1, y2, None)
assert type1 == y_type
if type1 == "continuous":
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
with pytest.raises(ValueError):
_check_reg_targets(y1, y2, None)
def test__check_reg_targets_exception():
invalid_multioutput = "this_value_is_not_valid"
expected_message = (
"Allowed 'multioutput' string values are.+You provided multioutput={!r}".format(
invalid_multioutput
)
)
with pytest.raises(ValueError, match=expected_message):
_check_reg_targets([1, 2, 3], [[1], [2], [3]], invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
err_msg = (
"multioutput is expected to be 'raw_values' "
"or 'uniform_average' but we got 'variance_weighted' instead."
)
with pytest.raises(ValueError, match=err_msg):
mean_pinball_loss(y_true, y_pred, multioutput="variance_weighted")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
mape = mean_absolute_percentage_error(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
evs2 = explained_variance_score(
y_true, y_pred, multioutput="raw_values", force_finite=False
)
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(pbl, [0.25 / 2, 0.625 / 2], decimal=2)
assert_array_almost_equal(mape, [0.0778, 0.2262], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs2, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]] * 4
y_pred = [[1, 1]] * 4
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(mse, [1.0, 1.0], decimal=2)
assert_array_almost_equal(mae, [1.0, 1.0], decimal=2)
assert_array_almost_equal(pbl, [0.5, 0.5], decimal=2)
assert_array_almost_equal(r, [0.0, 0.0], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values")
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert np.mean(r) == r2_score(
[[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="uniform_average"
)
evs = explained_variance_score(
[[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values"
)
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
evs2 = explained_variance_score(
[[0, -1], [0, 1]],
[[2, 2], [1, 1]],
multioutput="raw_values",
force_finite=False,
)
assert_array_almost_equal(evs2, [-np.inf, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [1, 2]]
y_pred = [[1, 4], [1, 1]]
r2 = r2_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(r2, [1.0, -3.0], decimal=2)
assert np.mean(r2) == r2_score(y_true, y_pred, multioutput="uniform_average")
r22 = r2_score(y_true, y_pred, multioutput="raw_values", force_finite=False)
assert_array_almost_equal(r22, [np.nan, -3.0], decimal=2)
assert_almost_equal(
np.mean(r22),
r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False),
)
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(evs, [1.0, -3.0], decimal=2)
assert np.mean(evs) == explained_variance_score(y_true, y_pred)
evs2 = explained_variance_score(
y_true, y_pred, multioutput="raw_values", force_finite=False
)
assert_array_almost_equal(evs2, [np.nan, -3.0], decimal=2)
assert_almost_equal(
np.mean(evs2), explained_variance_score(y_true, y_pred, force_finite=False)
)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput="raw_values")
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput="raw_values"
)
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
rmsew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6], squared=False)
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
mapew = mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw2 = explained_variance_score(
y_true, y_pred, multioutput=[0.4, 0.6], force_finite=False
)
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(rmsew, 0.59, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(mapew, 0.1668, decimal=2)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
assert_almost_equal(evsw2, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput=[0.3, 0.7]
)
assert_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize("metric", [r2_score, d2_tweedie_score])
def test_regression_single_sample(metric):
y_true = [0]
y_pred = [1]
warning_msg = "not well-defined with less than two samples."
# Trigger the warning
with pytest.warns(UndefinedMetricWarning, match=warning_msg):
score = metric(y_true, y_pred)
assert np.isnan(score)
def test_tweedie_deviance_continuity():
n_samples = 100
y_true = np.random.RandomState(0).rand(n_samples) + 0.1
y_pred = np.random.RandomState(1).rand(n_samples) + 0.1
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=0 - 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=0),
)
# Ws we get closer to the limit, with 1e-12 difference the absolute
# tolerance to pass the below check increases. There are likely
# numerical precision issues on the edges of different definition
# regions.
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=1 + 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=1),
atol=1e-6,
)
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=2 - 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=2),
atol=1e-6,
)
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=2 + 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=2),
atol=1e-6,
)
def test_mean_absolute_percentage_error():
random_number_generator = np.random.RandomState(42)
y_true = random_number_generator.exponential(size=100)
y_pred = 1.2 * y_true
assert mean_absolute_percentage_error(y_true, y_pred) == pytest.approx(0.2)
@pytest.mark.parametrize(
"distribution", ["normal", "lognormal", "exponential", "uniform"]
)
@pytest.mark.parametrize("target_quantile", [0.05, 0.5, 0.75])
def test_mean_pinball_loss_on_constant_predictions(distribution, target_quantile):
if not hasattr(np, "quantile"):
pytest.skip(
"This test requires a more recent version of numpy "
"with support for np.quantile."
)
# Check that the pinball loss is minimized by the empirical quantile.
n_samples = 3000
rng = np.random.RandomState(42)
data = getattr(rng, distribution)(size=n_samples)
# Compute the best possible pinball loss for any constant predictor:
best_pred = np.quantile(data, target_quantile)
best_constant_pred = np.full(n_samples, fill_value=best_pred)
best_pbl = mean_pinball_loss(data, best_constant_pred, alpha=target_quantile)
# Evaluate the loss on a grid of quantiles
candidate_predictions = np.quantile(data, np.linspace(0, 1, 100))
for pred in candidate_predictions:
# Compute the pinball loss of a constant predictor:
constant_pred = np.full(n_samples, fill_value=pred)
pbl = mean_pinball_loss(data, constant_pred, alpha=target_quantile)
# Check that the loss of this constant predictor is greater or equal
# than the loss of using the optimal quantile (up to machine
# precision):
assert pbl >= best_pbl - np.finfo(best_pbl.dtype).eps
# Check that the value of the pinball loss matches the analytical
# formula.
expected_pbl = (pred - data[data < pred]).sum() * (1 - target_quantile) + (
data[data >= pred] - pred
).sum() * target_quantile
expected_pbl /= n_samples
assert_almost_equal(expected_pbl, pbl)
# Check that we can actually recover the target_quantile by minimizing the
# pinball loss w.r.t. the constant prediction quantile.
def objective_func(x):
constant_pred = np.full(n_samples, fill_value=x)
return mean_pinball_loss(data, constant_pred, alpha=target_quantile)
result = optimize.minimize(objective_func, data.mean(), method="Nelder-Mead")
assert result.success
# The minimum is not unique with limited data, hence the large tolerance.
assert result.x == pytest.approx(best_pred, rel=1e-2)
assert result.fun == pytest.approx(best_pbl)
def test_dummy_quantile_parameter_tuning():
# Integration test to check that it is possible to use the pinball loss to
# tune the hyperparameter of a quantile regressor. This is conceptually
# similar to the previous test but using the scikit-learn estimator and
# scoring API instead.
n_samples = 1000
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, 5)) # Ignored
y = rng.exponential(size=n_samples)
all_quantiles = [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]
for alpha in all_quantiles:
neg_mean_pinball_loss = make_scorer(
mean_pinball_loss,
alpha=alpha,
greater_is_better=False,
)
regressor = DummyRegressor(strategy="quantile", quantile=0.25)
grid_search = GridSearchCV(
regressor,
param_grid=dict(quantile=all_quantiles),
scoring=neg_mean_pinball_loss,
).fit(X, y)
assert grid_search.best_params_["quantile"] == pytest.approx(alpha)
|
manhhomienbienthuy/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
Python
|
bsd-3-clause
| 23,340
|
from __init__ import *
path = os.environ["TDGU_PATH"] + '/'
fileoutp = open(path + 'pcatsubm.log', 'w')
cntr = 0
for name in os.listdir(path):
if name.endswith(".py"):
print name
fileobjt = open(path + name, 'r')
for line in fileobjt:
if line.startswith('def pcat_'):
#if cntr == 5:
# break
cntr += 1
namefunc = line[4:-1].split('(')[0]
cmnd = 'python $TDGU_PATH/%s %s' % (name, namefunc)
print cmnd
try:
os.system(cmnd)
except Exception as excp:
strg = str(excp)
fileoutp.write('%s failed.' % namefunc)
fileoutp.write(strg)
print
print
print
print
print
print
print
print
fileoutp.close()
|
tdaylan/tdgu
|
pcatsubm.py
|
Python
|
mit
| 1,007
|
from django.http import HttpResponse
from webpos.models import Category
def check(request):
try:
qs = Category.objects.all()
list(qs)
return HttpResponse("OK")
except Exception:
return HttpResponse("NO")
|
radome/OpenGenfri
|
pos/pos/views.py
|
Python
|
mit
| 245
|
from random import seed as seventythree
from math import floor as se
ven = sum
from random import random as ty
three = ty
tythree = range
def checkio ( ) :
seventythree( 73737373737373737373737373 )
return se( ven( ty( ) + three( ) for seven in tythree( 73 ) ) )
print( checkio( ) )
|
aureooms/checkio
|
home/the-best-number-ever.py
|
Python
|
agpl-3.0
| 296
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# base_addition - 2014-10-19 - ejb
from __future__ import print_function
import argparse
import sys
__version__ = '0.1'
'''
Revision history
0.1 Initial version
'''
DIGITS = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
debug = False
def parse_args():
parser = argparse.ArgumentParser(description="Add two numbers in a given base (default 10).")
parser.add_argument("-b", "--base", type=int, dest="base", choices=xrange(2, 36 + 1), default=10, help="base (2-36); default = %(default)s")
group = parser.add_mutually_exclusive_group()
group.add_argument("--addends", type=str, metavar="NUM", nargs='+', help="addends")
group.add_argument("--test", action="store_true", help="run test suite")
parser.add_argument('--version', action='version', version='%(prog)s v' + str(__version__))
parser.add_argument("--debug", "--verbose", action="store_true", dest="debug", help="enable debugging output")
return parser.parse_args()
def log_debug(debug_str):
if debug:
print("DEBUG (addition): ", debug_str)
def log_error(error_str):
# EJB: I originally had 'print >> sys.stderr, "ERROR: ", error_str' here
# and it was getting buffered, even followed by sys.stderr.flush().
sys.stderr.write("ERROR: {}.\n".format(error_str))
def run_tests():
assert(base_addition(debug=False, base=2, addends=[11, 1]) == '100')
assert(base_addition(debug=False, base=3, addends=[100020, 122]) == '100212')
assert(base_addition(debug=False, base=3, addends=[12, 21]) == '110')
assert(base_addition(debug=False, base=3, addends=[1101, 21]) == '1122')
assert(base_addition(debug=False, base=5, addends=[0, 0, 4, 1, 0, 0]) == '10')
assert(base_addition(debug=False, base=5, addends=[143, 43]) == '241')
assert(base_addition(debug=False, base=5, addends=[1324, 33, 2313]) == '4230')
assert(base_addition(debug=False, base=6, addends=[32001, 5414, 221]) == '42040')
assert(base_addition(debug=False, base=8, addends=[4, 4, 4, 4]) == '20')
assert(base_addition(debug=False, base=12, addends=['3B7', '25B9', '210A', 'B', '82']) == '4B93')
assert(base_addition(debug=False, base=17, addends=['4FEDCBA', '3146B5A']) == '8023703')
assert(base_addition(debug=False, base=20, addends=['177', '4B9', '33', '14G', '2']) == '76H')
assert(base_addition(debug=False, base=22, addends=['ED', 'GAIL']) == 'GBBC')
print ('All tests passed.')
def base_addition(debug, base, addends):
digits = DIGITS[0:base]
accumulator = 0 # Additive identity
# Validate that input digits are valid in given base
valid = True
for addend in addends:
for digit in str(addend).upper():
if not str(digit).upper() in digits:
valid = False
log_error('Invalid number {} for base {}'.format(addend, base))
break
if not valid:
sys.exit(-1)
if len(addends) == 0:
return str(addends[0])
else:
accumulator = str(addends.pop())
while len(addends) > 0:
x = str(accumulator).upper()
y = str(addends.pop()).upper()
log_debug('x input: {}: '.format(x))
log_debug('y input: {}: '.format(y))
x = x[::-1] # Reverse string so we can do right-to-left processing left-to-right
y = y[::-1]
log_debug('x reversed: {}'.format(x))
log_debug('y reversed: {}'.format(y))
# right-pad (because reversed) to make same number of digits, for simplicity
max_len = max(len(x), len(y))
x += (max_len - len(x)) * '0'
y += (max_len - len(y)) * '0'
log_debug('x padded: {}'.format(x))
log_debug('y padded: {}'.format(y))
out_str = ''
carry_in = 0
for i in range(max_len):
(carry_out, output) = divmod(digits.index(x[i]) + digits.index(y[i]), base)
output += carry_in
if output >= base:
(carry_out, output) = divmod(output, base)
assert(output < base)
out_str = '{}{}'.format(digits[output], out_str)
carry_in = carry_out
if carry_out:
out_str = '{}{}'.format(digits[carry_out], out_str)
accumulator = out_str
log_debug('Answer: {}'.format(accumulator))
return accumulator
if __name__ == "__main__":
args = parse_args()
debug = args.debug
log_debug(args)
if args.test:
run_tests()
else:
base = args.base
addends = args.addends
answer = base_addition(debug=debug, base=base, addends=addends)
print('Answer: {}'.format(answer))
|
ebassett/base_addition
|
base_addition.py
|
Python
|
gpl-2.0
| 4,658
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):14433")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
bitmxittz/Bitmxittz
|
contrib/seeds/makeseeds.py
|
Python
|
mit
| 709
|
"""
accounts
FILE: forms.py
Created: 6/21/15 8:31 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from django import forms
from django.utils.safestring import mark_safe
from registration.forms import (RegistrationFormUniqueEmail,
RegistrationFormTermsOfService)
from accounts.models import User
class Email(forms.EmailField):
def clean(self, value):
value = value.lower()
super(Email, self).clean(value)
try:
User.objects.get(email=value)
raise forms.ValidationError(mark_safe(
"This email is already registered. <br/>Use "
"<a href='/password/reset'>this forgot password</a> link "
"or on the <a href ='/accounts/login?next=/'>login page</a>."))
except User.DoesNotExist:
return value
class UserRegistrationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
is_user set to True (Default in the User Model)
is_developer set to False (Default in the User Model
"""
# email will be no longer become username
email = Email()
password1 = forms.CharField(widget=forms.PasswordInput(),
label="Password")
password2 = forms.CharField(widget=forms.PasswordInput(),
label="Repeat your password")
applicant_role = "User"
fields = ['username', 'email', 'password1', 'password2', 'applicant_role' ]
# is_user = forms.BooleanField(widget=forms.HiddenInput(), initial=True,)
# is_developer = forms.BooleanField(widget=forms.HiddenInput(), initial=False, required=False)
def clean_password(self):
if self.data['password1'] != self.data['password2']:
raise forms.ValidationError('Passwords are not the same')
return self.data['password1']
def save(self):
u = super(UserRegistrationForm, self).save(commit=False)
u.is_user = True
u.is_developer = False
u.save()
return u
class DeveloperRegistrationForm(forms.ModelForm):
"""
A form for creating new Developer users. Includes all the required
fields, plus a repeated password.
is_user should be set to false
is_developer should be set to True
"""
# email will be no longer become username
email = Email()
password1 = forms.CharField(widget=forms.PasswordInput(),
label="Password")
password2 = forms.CharField(widget=forms.PasswordInput(),
label="Repeat your password")
applicant_role = "Developer"
fields = ['username', 'email', 'password1', 'password2', 'applicant_role' ]
# is_user = forms.BooleanField(widget=forms.HiddenInput(), initial=False)
# is_developer = forms.BooleanField(widget=forms.HiddenInput(), initial=True)
def clean_password(self):
if self.data['password1'] != self.data['password2']:
raise forms.ValidationError('Passwords are not the same')
return self.data['password1']
def save(self):
u = super(DeveloperRegistrationForm, self).save(commit=False)
u.is_user = False
u.is_developer = True
u.save()
return u
class RegistrationFormUserTOSAndEmail(UserRegistrationForm,
RegistrationFormUniqueEmail,
RegistrationFormTermsOfService,
):
pass
class RegistrationFormDeveloperTOSAndEmail(DeveloperRegistrationForm,
RegistrationFormUniqueEmail,
RegistrationFormTermsOfService,
):
pass
class RegistrationFormTOSAndEmail(
RegistrationFormUniqueEmail,
RegistrationFormTermsOfService,
):
pass
|
ekivemark/BlueButtonFHIR_API
|
accounts/forms/other.py
|
Python
|
apache-2.0
| 3,932
|
from itertools import chain
from bokeh.core.enums import NamedColor as Color, LineJoin, LineCap
# enums
FILL = ["fill_color", "fill_alpha"]
LINE = ["line_color", "line_width", "line_alpha", "line_join", "line_cap", "line_dash", "line_dash_offset"]
TEXT = ["text_font", "text_font_size", "text_font_style", "text_color", "text_alpha", "text_align", "text_baseline"]
ANGLE = ["angle", "angle_units"]
PROPS = ["name", "tags", "js_callbacks"]
GLYPH = ["visible"]
MARKER = ["x", "y", "size", "angle", "angle_units"]
def prefix(prefix, props):
return [prefix + p for p in props]
def check_properties_existence(model, *props):
expected = set(chain(PROPS, *props))
found = set(model.properties())
missing = expected.difference(found)
extra = found.difference(expected)
assert len(missing) == 0, "Properties missing: {0}".format(", ".join(sorted(missing)))
assert len(extra) == 0, "Extra properties: {0}".format(", ".join(sorted(extra)))
def check_fill_properties(model, prefix="", fill_color=Color.gray, fill_alpha=1.0):
assert getattr(model, prefix + "fill_color") == fill_color
assert getattr(model, prefix + "fill_alpha") == fill_alpha
def check_line_properties(model, prefix="", line_color=Color.black, line_width=1.0, line_alpha=1.0):
assert getattr(model, prefix + "line_color") == line_color
assert getattr(model, prefix + "line_width") == line_width
assert getattr(model, prefix + "line_alpha") == line_alpha
assert getattr(model, prefix + "line_join") == LineJoin.miter
assert getattr(model, prefix + "line_cap") == LineCap.butt
assert getattr(model, prefix + "line_dash") == []
assert getattr(model, prefix + "line_dash_offset") == 0
def check_text_properties(model, prefix="", font_size='12pt', baseline='bottom', font_style='normal', align="left"):
assert getattr(model, prefix + "text_font") == "helvetica"
assert getattr(model, prefix + "text_font_size") == {"value": font_size}
assert getattr(model, prefix + "text_font_style") == font_style
assert getattr(model, prefix + "text_color") == "#444444"
assert getattr(model, prefix + "text_alpha") == 1.0
assert getattr(model, prefix + "text_align") == align
assert getattr(model, prefix + "text_baseline") == baseline
def check_marker_properties(marker):
assert marker.x is None
assert marker.y is None
assert marker.size == 4
|
draperjames/bokeh
|
bokeh/models/tests/utils/property_utils.py
|
Python
|
bsd-3-clause
| 2,396
|
# coding: latin-1
import quicksort2
# construction de l'arbre
racine = quicksort2.construit_arbre ()
# construction de l'image du graphe
racine.image ("graph.txt", "graph.png")
# construction du début du fichier tex
package = """a4 amsmath amssymb subfigure float latexsym amsfonts
epic eepic makeidx multido varindex moreverb alltt fancyvrb fancyhdr
color eurosym tabularx placeins url shorttoc""".split ()
header = """\\documentclass[french,11pt]{article}\n\\usepackage[french]{babel}
\\usepackage[usenames]{color}\\usepackage{""" + \
"}\n\\usepackage{".join (package) + \
"""}\\usepackage[small,normal]{caption2}\\urlstyle{sf}
\\usepackage[pdftex]{graphicx}\usepackage[T1]{fontenc}
\DefineVerbatimEnvironment{verbatimx}{Verbatim}{frame=single,
framerule=.1pt, framesep=1.5mm, fontsize=\\footnotesize,xleftmargin=0pt}
\\begin{document}\n"""
# création d'un fichier tex
f = open ("page.tex", "w")
f.write (header)
f.write ("\\title{Tri quicksort}\n") # définit le titre
f.write ("\\maketitle\n") # écrit le titre
f.write ("\\tableofcontents\n") # table des matières
f.write ("\\section{liste triée}\n") # titre pour la liste triée
s = str (racine) # on récupère la liste triée
s = s.replace ("\n", "\\\\ \n") # \\ passe à la ligne
f.write ("\\begin{tabular}{|l|}\n")
f.write (s)
f.write ("\\end{tabular}\n")
f.write ("\\section{graphe}\n") # titre pour l'image
f.write ('\\includegraphics[height=5cm]{graph.png}\n') # image
f.write ("\\section{code du graphe}\n") # titre pour le code du graphe
s = racine.chaine_graphe () # on récupère le code du graphe
f.write ("\\begin{verbatimx}\n") # on l'affiche tel quel
f.write (s)
f.write ("\\end{verbatimx}\n")
f.write ("\\end{document}\n") # fin
f.close ()
# on compile deux fois le fichier pour que la table des matières
# soit bien prise en compte
import os
os.system (r'"C:\Program Files\MiKTeX 2.7\miktex\bin\pdflatex" page.tex')
os.system (r'"C:\Program Files\MiKTeX 2.7\miktex\bin\pdflatex" page.tex')
# on affiche le résultat avec Adobe Reader
os.system (r'"C:\Program Files\Adobe\Reader 9.0\Reader\AcroRd32.exe" page.pdf')
|
sdpython/ensae_teaching_cs
|
_todo/programme/quicksort4.py
|
Python
|
mit
| 2,214
|
import sys
import traceback
from io import BytesIO
from unittest import TestCase
from wsgiref import simple_server
# If data is too large, socket will choke, so write chunks no larger than 32MB
# at a time. The rationale behind the 32MB can be found in #5596#comment:4.
MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB
class ServerHandler(simple_server.ServerHandler):
error_status = "500 INTERNAL SERVER ERROR"
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
data = BytesIO(data)
for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''):
self._write(chunk)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
class DummyHandler:
def log_request(self, *args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
super(FileWrapperHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return environ['wsgi.file_wrapper'](BytesIO(b'foo'))
class WSGIFileWrapperTests(TestCase):
"""
The wsgi.file_wrapper works for the builting server.
Tests for #9659: wsgi.file_wrapper in the builtin server.
We need to mock a couple of handlers and keep track of what
gets called when using a couple kinds of WSGI apps.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app_file_wrapper)
self.assertTrue(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue(), b'')
self.assertEqual(handler.stderr.getvalue(), b'')
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app)
self.assertFalse(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1], b'Hello World!')
self.assertEqual(handler.stderr.getvalue(), b'')
class WriteChunkCounterHandler(ServerHandler):
"""
Server handler that counts the number of chunks written after headers were
sent. Used to make sure large response body chunking works properly.
"""
def __init__(self, *args, **kwargs):
super(WriteChunkCounterHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self.headers_written = False
self.write_chunk_counter = 0
def send_headers(self):
super(WriteChunkCounterHandler, self).send_headers()
self.headers_written = True
def _write(self, data):
if self.headers_written:
self.write_chunk_counter += 1
self.stdout.write(data)
def send_big_data_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
# Return a blob of data that is 1.5 times the maximum chunk size.
return [b'x' * (MAX_SOCKET_CHUNK_SIZE + MAX_SOCKET_CHUNK_SIZE // 2)]
class ServerHandlerChunksProperly(TestCase):
"""
The ServerHandler chunks data properly.
Tests for #18972: The logic that performs the math to break data into
32MB (MAX_SOCKET_CHUNK_SIZE) chunks was flawed, BUT it didn't actually
cause any problems.
"""
def test_chunked_data(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = WriteChunkCounterHandler(None, BytesIO(), BytesIO(), env)
handler.run(send_big_data_app)
self.assertEqual(handler.write_chunk_counter, 2)
|
twz915/django
|
tests/builtin_server/tests.py
|
Python
|
bsd-3-clause
| 4,631
|
"""Define constants for the Sensor.Community integration."""
from datetime import timedelta
ATTR_SENSOR_ID = "sensor_id"
CONF_SENSOR_ID = "sensor_id"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
DOMAIN = "luftdaten"
|
rohitranjan1991/home-assistant
|
homeassistant/components/luftdaten/const.py
|
Python
|
mit
| 221
|
""" netentry -- Network entry widgets for the GUI.
This module provides GUI widgets used to represent wired and wireless
entries in the GUI's network list, as well as any settings dialogs
contained within them.
"""
#
# Copyright (C) 2008-2009 Adam Blackburn
# Copyright (C) 2008-2009 Dan O'Reilly
# Copyright (C) 2009 Andrew Psaltis
# Copyright (C) 2011 David Paleino
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import os
import wicd.misc as misc
import wicd.wpath as wpath
import wicd.dbusmanager as dbusmanager
from wicd.misc import noneToString, stringToNone, noneToBlankString, to_bool
from guiutil import error, LabelEntry, GreyLabel, LeftAlignedLabel
from guiutil import string_input, ProtectedLabelEntry, LabelCombo
from wicd.translations import language, _
# These get set when a NetworkEntry is instantiated.
daemon = None
wired = None
wireless = None
def setup_dbus():
""" Initialize DBus. """
global daemon, wireless, wired
daemon = dbusmanager.get_interface('daemon')
wireless = dbusmanager.get_interface('wireless')
wired = dbusmanager.get_interface('wired')
class AdvancedSettingsDialog(gtk.Dialog):
""" Advanced settings dialog. """
def __init__(self, network_name=None):
""" Build the base advanced settings dialog.
This class isn't used by itself, instead it is used as a parent for
the WiredSettingsDialog and WirelessSettingsDialog.
"""
# if no network name was passed, just use Properties as the title
if network_name:
title = '%s - %s' % (network_name, _('Properties'))
else:
title = _('Properties')
gtk.Dialog.__init__(
self,
title=title,
flags=gtk.DIALOG_MODAL,
buttons=(
gtk.STOCK_CANCEL,
gtk.RESPONSE_REJECT,
gtk.STOCK_OK,
gtk.RESPONSE_ACCEPT
)
)
self.set_default_size()
self.connect('show', lambda *a, **k: self.set_default_size())
# Set up the Advanced Settings Dialog.
self.txt_ip = LabelEntry(_('IP'))
self.txt_ip.entry.connect('focus-out-event', self.set_defaults)
self.txt_netmask = LabelEntry(_('Netmask'))
self.txt_gateway = LabelEntry(_('Gateway'))
self.txt_search_dom = LabelEntry(_('Search domain'))
self.txt_domain = LabelEntry(_('DNS domain'))
self.txt_dns_1 = LabelEntry(_('DNS server') + ' 1')
self.txt_dns_2 = LabelEntry(_('DNS server') + ' 2')
self.txt_dns_3 = LabelEntry(_('DNS server') + ' 3')
dhcp_hostname_hbox = gtk.HBox(False, 0)
self.chkbox_use_dhcp_hostname = gtk.CheckButton()
self.txt_dhcp_hostname = LabelEntry("DHCP Hostname")
dhcp_hostname_hbox.pack_start(
self.chkbox_use_dhcp_hostname, fill=False, expand=False)
dhcp_hostname_hbox.pack_start(self.txt_dhcp_hostname)
self.chkbox_static_ip = gtk.CheckButton(_('Use Static IPs'))
self.chkbox_static_dns = gtk.CheckButton(_('Use Static DNS'))
self.chkbox_global_dns = gtk.CheckButton(_('Use global DNS servers'))
self.hbox_dns = gtk.HBox(False, 0)
self.hbox_dns.pack_start(self.chkbox_static_dns)
self.hbox_dns.pack_start(self.chkbox_global_dns)
# Set up the script settings button
self.script_button = gtk.Button()
script_image = gtk.Image()
script_image.set_from_stock(gtk.STOCK_EXECUTE, 4)
script_image.set_padding(4, 0)
#self.script_button.set_alignment(.5, .5)
self.script_button.set_image(script_image)
self.script_button.set_label(_('Scripts'))
self.button_hbox = gtk.HBox(False, 2)
self.button_hbox.pack_start(
self.script_button, fill=False, expand=False)
self.button_hbox.show()
self.swindow = gtk.ScrolledWindow()
self.swindow.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.viewport = gtk.Viewport()
self.viewport.set_shadow_type(gtk.SHADOW_NONE)
self.cvbox = gtk.VBox()
self.viewport.add(self.cvbox)
self.swindow.add(self.viewport)
# pylint: disable-msg=E1101
self.vbox.pack_start(self.swindow)
assert(isinstance(self.cvbox, gtk.VBox))
self.cvbox.pack_start(self.chkbox_static_ip, fill=False, expand=False)
self.cvbox.pack_start(self.txt_ip, fill=False, expand=False)
self.cvbox.pack_start(self.txt_netmask, fill=False, expand=False)
self.cvbox.pack_start(self.txt_gateway, fill=False, expand=False)
self.cvbox.pack_start(self.hbox_dns, fill=False, expand=False)
self.cvbox.pack_start(self.txt_domain, fill=False, expand=False)
self.cvbox.pack_start(self.txt_search_dom, fill=False, expand=False)
self.cvbox.pack_start(self.txt_dns_1, fill=False, expand=False)
self.cvbox.pack_start(self.txt_dns_2, fill=False, expand=False)
self.cvbox.pack_start(self.txt_dns_3, fill=False, expand=False)
self.cvbox.pack_start(dhcp_hostname_hbox, fill=False, expand=False)
self.cvbox.pack_end(
self.button_hbox, fill=False, expand=False, padding=5)
# Connect the events to the actions
self.chkbox_static_ip.connect("toggled", self.toggle_ip_checkbox)
self.chkbox_static_dns.connect("toggled", self.toggle_dns_checkbox)
self.chkbox_global_dns.connect(
"toggled", self.toggle_global_dns_checkbox)
self.chkbox_use_dhcp_hostname.connect(
'toggled',
self.toggle_dhcp_hostname_checkbox
)
# Start with all disabled, then they will be enabled later.
self.chkbox_static_ip.set_active(False)
self.chkbox_static_dns.set_active(False)
def set_default_size(self):
""" Set default window size. """
width, height = self.get_size()
s_height = gtk.gdk.screen_height()
if s_height < 768:
height = s_height * .75
else:
height = 600
self.resize(int(width), int(height))
def set_defaults(self, widget=None, event=None):
""" Put some default values into entries to help the user out. """
self.txt_ip.set_text(self.txt_ip.get_text().strip())
ip = self.txt_ip.get_text() # For easy typing :)
netmask = self.txt_netmask
gateway = self.txt_gateway
if misc.IsValidIP(ip):
# Only do these things if it's IPv4
if misc.IsValidIPv4(ip):
# Make sure the gateway box is blank
if stringToNone(gateway.get_text()) is None:
# Fill it in with a .1 at the end
gateway.set_text(ip[:ip.rindex('.')] + '.1')
# Make sure the netmask is blank
if stringToNone(netmask.get_text()) is None:
# Fill in the most common one
if ip.startswith('172'):
netmask.set_text('255.240.0.0')
elif ip.startswith('10'):
netmask.set_text('255.0.0.0')
else:
# 192.168 and all other cases
netmask.set_text('255.255.255.0')
elif ip != '':
error(None, _('Invalid IP address entered.'))
def reset_static_checkboxes(self):
""" Enable the right stuff. """
if stringToNone(self.txt_ip.get_text()):
self.chkbox_static_ip.set_active(True)
self.chkbox_static_dns.set_active(True)
self.chkbox_static_dns.set_sensitive(False)
else:
self.chkbox_static_ip.set_active(False)
self.chkbox_static_dns.set_sensitive(True)
if stringToNone(self.txt_dns_1.get_text()) or \
self.chkbox_global_dns.get_active():
self.chkbox_static_dns.set_active(True)
else:
self.chkbox_static_dns.set_active(False)
# This will properly disable unused boxes.
self.toggle_ip_checkbox()
self.toggle_dns_checkbox()
self.toggle_global_dns_checkbox()
def toggle_ip_checkbox(self, widget=None):
"""Toggle entries/checkboxes based on the static IP checkbox. """
# Should disable the static IP text boxes, and also enable the DNS
# checkbox when disabled and disable when enabled.
if self.chkbox_static_ip.get_active():
self.chkbox_static_dns.set_active(True)
self.chkbox_static_dns.set_sensitive(False)
else:
self.chkbox_static_dns.set_sensitive(True)
self.txt_ip.set_sensitive(self.chkbox_static_ip.get_active())
self.txt_netmask.set_sensitive(self.chkbox_static_ip.get_active())
self.txt_gateway.set_sensitive(self.chkbox_static_ip.get_active())
def toggle_dns_checkbox(self, widget=None):
""" Toggle entries and checkboxes based on the static dns checkbox. """
# Should disable the static DNS boxes
if self.chkbox_static_ip.get_active():
self.chkbox_static_dns.set_active(True)
self.chkbox_static_dns.set_sensitive(False)
self.chkbox_global_dns.set_sensitive(self.chkbox_static_dns.
get_active())
l = [self.txt_dns_1, self.txt_dns_2, self.txt_dns_3, self.txt_domain,
self.txt_search_dom]
if self.chkbox_static_dns.get_active():
# If global dns is on, don't use local dns
for w in l:
w.set_sensitive(not self.chkbox_global_dns.get_active())
else:
for w in l:
w.set_sensitive(False)
self.chkbox_global_dns.set_active(False)
def toggle_dhcp_hostname_checkbox(self, widget=None):
""" Set widget sensitivity. """
self.txt_dhcp_hostname.set_sensitive(
self.chkbox_use_dhcp_hostname.get_active())
def toggle_global_dns_checkbox(self, widget=None):
""" Set the DNS entries' sensitivity based on the Global checkbox. """
global_dns_active = daemon.GetUseGlobalDNS()
if not global_dns_active and self.chkbox_global_dns.get_active():
error(
None,
_('Global DNS has not been enabled in general preferences.')
)
self.chkbox_global_dns.set_active(False)
if daemon.GetUseGlobalDNS() and self.chkbox_static_dns.get_active():
for w in [self.txt_dns_1, self.txt_dns_2, self.txt_dns_3,
self.txt_domain, self.txt_search_dom]:
w.set_sensitive(not self.chkbox_global_dns.get_active())
def toggle_encryption(self, widget=None):
""" Toggle the encryption combobox based on the encryption checkbox. """
active = self.chkbox_encryption.get_active()
self.vbox_encrypt_info.set_sensitive(active)
self.combo_encryption.set_sensitive(active)
def destroy_called(self, *args):
""" Clean up everything. """
super(AdvancedSettingsDialog, self).destroy()
self.destroy()
del self
def save_settings(self, networkid=None):
""" Save settings common to wired and wireless settings dialogs. """
if self.chkbox_static_ip.get_active():
self.set_net_prop("ip", noneToString(self.txt_ip.get_text()))
self.set_net_prop(
"netmask", noneToString(self.txt_netmask.get_text()))
self.set_net_prop(
"gateway", noneToString(self.txt_gateway.get_text()))
else:
self.set_net_prop("ip", '')
self.set_net_prop("netmask", '')
self.set_net_prop("gateway", '')
if self.chkbox_static_dns.get_active() and \
not self.chkbox_global_dns.get_active():
self.set_net_prop('use_static_dns', True)
self.set_net_prop('use_global_dns', False)
self.set_net_prop(
'dns_domain', noneToString(self.txt_domain.get_text()))
self.set_net_prop(
"search_domain", noneToString(self.txt_search_dom.get_text()))
self.set_net_prop("dns1", noneToString(self.txt_dns_1.get_text()))
self.set_net_prop("dns2", noneToString(self.txt_dns_2.get_text()))
self.set_net_prop("dns3", noneToString(self.txt_dns_3.get_text()))
elif self.chkbox_static_dns.get_active() and \
self.chkbox_global_dns.get_active():
self.set_net_prop('use_static_dns', True)
self.set_net_prop('use_global_dns', True)
else:
self.set_net_prop('use_static_dns', False)
self.set_net_prop('use_global_dns', False)
self.set_net_prop('dns_domain', '')
self.set_net_prop("search_domain", '')
self.set_net_prop("dns1", '')
self.set_net_prop("dns2", '')
self.set_net_prop("dns3", '')
self.set_net_prop('usedhcphostname',
self.chkbox_use_dhcp_hostname.get_active())
self.set_net_prop(
"dhcphostname",noneToString(self.txt_dhcp_hostname.get_text()))
def change_encrypt_method(self, widget=None):
""" Load all the entries for a given encryption method. """
for z in self.vbox_encrypt_info:
z.destroy() # Remove stuff in there already
ID = self.combo_encryption.get_active()
methods = self.encrypt_types
self.encryption_info = {}
# If nothing is selected, select the first entry.
if ID == -1:
self.combo_encryption.set_active(0)
ID = 0
for type_ in ['required', 'optional']:
fields = methods[ID][type_]
for field in fields:
try:
field_text = language[field[1].lower().replace(' ', '_')]
except KeyError:
field_text = field[1].replace(' ', '_')
if field in methods[ID]['protected']:
box = ProtectedLabelEntry(field_text)
else:
box = LabelEntry(field_text)
self.vbox_encrypt_info.pack_start(box)
# Add the data to a dict, so that the information
# can be easily accessed by giving the name of the wanted
# data.
self.encryption_info[field[0]] = [box, type_]
if self.wired:
box.entry.set_text(noneToBlankString(
wired.GetWiredProperty(field[0])))
else:
box.entry.set_text(noneToBlankString(
wireless.GetWirelessProperty(self.networkID, field[0])))
self.vbox_encrypt_info.show_all()
class WiredSettingsDialog(AdvancedSettingsDialog):
""" Wired settings dialog. """
def __init__(self, name):
""" Build the wired settings dialog. """
AdvancedSettingsDialog.__init__(self, _('Wired Network'))
# So we can test if we are wired or wireless (for
# change_encrypt_method())
self.wired = True
## This section is largely copied from WirelessSettingsDialog, but with
## some changes
# Set up encryption stuff
self.combo_encryption = gtk.combo_box_new_text()
self.chkbox_encryption = gtk.CheckButton(_('Use Encryption'))
# Make the vbox to hold the encryption stuff.
self.vbox_encrypt_info = gtk.VBox(False, 0)
self.chkbox_encryption.set_active(
bool(wired.GetWiredProperty('encryption_enabled')))
self.combo_encryption.set_sensitive(False)
self.encrypt_types = misc.LoadEncryptionMethods(wired=True)
# Build the encryption menu
for x, enc_type in enumerate(self.encrypt_types):
self.combo_encryption.append_text(enc_type['name'])
self.combo_encryption.set_active(0)
self.change_encrypt_method()
self.toggle_encryption()
self.cvbox.pack_start(self.chkbox_encryption, False, False)
self.cvbox.pack_start(self.combo_encryption, False, False)
self.cvbox.pack_start(self.vbox_encrypt_info, False, False)
# Connect signals.
self.chkbox_encryption.connect("toggled", self.toggle_encryption)
self.combo_encryption.connect("changed", self.change_encrypt_method)
self.des = self.connect("destroy", self.destroy_called)
self.script_button.connect("clicked", self.edit_scripts)
self.prof_name = name
def set_net_prop(self, option, value):
""" Sets the given option to the given value for this network. """
wired.SetWiredProperty(option, value)
def edit_scripts(self, widget=None, event=None):
""" Launch the script editting dialog. """
profile = self.prof_name
cmdend = [os.path.join(wpath.gtk, "configscript.py"), profile, "wired"]
if os.getuid() != 0:
cmdbase = misc.get_sudo_cmd(
_('You must enter your password to configure scripts'),
prog_num=daemon.GetSudoApp()
)
if not cmdbase:
error(None,
_('Could not find a graphical sudo program. '
'The script editor could not be launched. '
"You'll have to edit scripts directly your configuration "
"file.")
)
return
cmdbase.extend(cmdend)
misc.LaunchAndWait(cmdbase)
else:
misc.LaunchAndWait(cmdend)
def set_values(self):
""" Fill in the Gtk.Entry objects with the correct values. """
self.txt_ip.set_text(self.format_entry("ip"))
self.txt_netmask.set_text(self.format_entry("netmask"))
self.txt_gateway.set_text(self.format_entry("gateway"))
self.txt_dns_1.set_text(self.format_entry("dns1"))
self.txt_dns_2.set_text(self.format_entry("dns2"))
self.txt_dns_3.set_text(self.format_entry("dns3"))
self.txt_domain.set_text(self.format_entry("dns_domain"))
self.txt_search_dom.set_text(self.format_entry("search_domain"))
self.chkbox_global_dns.set_active(
bool(wired.GetWiredProperty("use_global_dns")))
dhcphname = wired.GetWiredProperty("dhcphostname")
if dhcphname is None:
dhcphname = os.uname()[1]
self.txt_dhcp_hostname.set_text(dhcphname)
self.reset_static_checkboxes()
self.chkbox_encryption.set_active(
bool(wired.GetWiredProperty('encryption_enabled')))
self.change_encrypt_method()
self.toggle_encryption()
def save_settings(self, networkid=None):
""" Save settings to disk. """
# Check encryption info
encrypt_info = self.encryption_info
self.set_net_prop(
"encryption_enabled",
self.chkbox_encryption.get_active()
)
if self.chkbox_encryption.get_active():
print "setting encryption info..."
encrypt_methods = self.encrypt_types
self.set_net_prop(
"enctype",
encrypt_methods[self.combo_encryption.get_active()]['type']
)
# Make sure all required fields are filled in.
for entry_info in encrypt_info.itervalues():
if entry_info[0].entry.get_text() == "" and \
entry_info[1] == 'required':
error(
self,
"%s (%s)" % (
_('Required encryption information is missing.'),
entry_info[0].label.get_label()
)
)
return False
# Now save all the entries.
for entry_key, entry_info in encrypt_info.iteritems():
self.set_net_prop(entry_key,
noneToString(entry_info[0].entry.get_text()))
else:
print "no encryption specified..."
self.set_net_prop("enctype", "None")
AdvancedSettingsDialog.save_settings(self)
wired.SaveWiredNetworkProfile(self.prof_name)
return True
def format_entry(self, label):
""" Helper method to fetch and format wired properties. """
return noneToBlankString(wired.GetWiredProperty(label))
def destroy_called(self, *args):
""" Clean up everything. """
self.disconnect(self.des)
super(WiredSettingsDialog, self).destroy_called()
self.destroy()
del self
class WirelessSettingsDialog(AdvancedSettingsDialog):
""" Wireless settings dialog. """
def __init__(self, networkID):
""" Build the wireless settings dialog. """
AdvancedSettingsDialog.__init__(
self, wireless.GetWirelessProperty(networkID, 'essid'))
# So we can test if we are wired or wireless (for
# change_encrypt_method())
self.wired = False
# Set up encryption stuff
self.networkID = networkID
self.combo_encryption = gtk.combo_box_new_text()
self.chkbox_encryption = gtk.CheckButton(_('Use Encryption'))
self.chkbox_global_settings = gtk.CheckButton(
_('Use these settings for all networks sharing this essid'))
rate_vbox = gtk.VBox(False, 0)
self.combo_rate = LabelCombo(_('Wireless bitrate'))
rates = gtk.ListStore(str)
self.bitrates = wireless.GetAvailableBitrates()
self.bitrates.append('auto')
for br in self.bitrates:
rates.append((br,))
self.combo_rate.set_model(rates)
self.chkbox_lower_rate = gtk.CheckButton(_('Allow lower bitrates'))
rate_vbox.pack_start(self.combo_rate)
rate_vbox.pack_start(self.chkbox_lower_rate)
# Make the vbox to hold the encryption stuff.
self.vbox_encrypt_info = gtk.VBox(False, 0)
self.toggle_encryption()
self.chkbox_encryption.set_active(False)
self.combo_encryption.set_sensitive(False)
self.encrypt_types = misc.LoadEncryptionMethods()
information_button = gtk.Button(stock=gtk.STOCK_INFO)
self.button_hbox.pack_start(information_button, False, False)
information_button.connect(
'clicked',
lambda *a, **k: WirelessInformationDialog(networkID, self)
)
information_button.show()
# Build the encryption menu
activeID = -1 # Set the menu to this item when we are done
for x, enc_type in enumerate(self.encrypt_types):
self.combo_encryption.append_text(enc_type['name'])
if enc_type['type'] == \
wireless.GetWirelessProperty(networkID, "enctype"):
activeID = x
self.combo_encryption.set_active(activeID)
if activeID != -1:
self.chkbox_encryption.set_active(True)
self.combo_encryption.set_sensitive(True)
self.vbox_encrypt_info.set_sensitive(True)
else:
self.combo_encryption.set_active(0)
self.change_encrypt_method()
self.cvbox.pack_start(rate_vbox, False, False)
self.cvbox.pack_start(self.chkbox_global_settings, False, False)
self.cvbox.pack_start(self.chkbox_encryption, False, False)
self.cvbox.pack_start(self.combo_encryption, False, False)
self.cvbox.pack_start(self.vbox_encrypt_info, False, False)
# Connect signals.
self.chkbox_encryption.connect("toggled", self.toggle_encryption)
self.combo_encryption.connect("changed", self.change_encrypt_method)
self.script_button.connect("clicked", self.edit_scripts)
self.des = self.connect("destroy", self.destroy_called)
def destroy_called(self, *args):
""" Clean up everything. """
self.disconnect(self.des)
super(WirelessSettingsDialog, self).destroy_called()
self.destroy()
del self
def edit_scripts(self, widget=None, event=None):
""" Launch the script editting dialog. """
cmdend = [os.path.join(wpath.gtk, "configscript.py"),
str(self.networkID), "wireless"]
if os.getuid() != 0:
cmdbase = misc.get_sudo_cmd(
_('You must enter your password to configure scripts'),
prog_num=daemon.GetSudoApp()
)
if not cmdbase:
error(
None,
_('Could not find a graphical sudo program. '
'The script editor could not be launched. '
"You'll have to edit scripts directly your "
"configuration file.")
)
return
cmdbase.extend(cmdend)
misc.LaunchAndWait(cmdbase)
else:
misc.LaunchAndWait(cmdend)
def set_net_prop(self, option, value):
""" Sets the given option to the given value for this network. """
wireless.SetWirelessProperty(self.networkID, option, value)
def set_values(self):
""" Set the various network settings to the right values. """
networkID = self.networkID
self.txt_ip.set_text(self.format_entry(networkID, "ip"))
self.txt_netmask.set_text(self.format_entry(networkID, "netmask"))
self.txt_gateway.set_text(self.format_entry(networkID, "gateway"))
self.chkbox_global_dns.set_active(
bool(wireless.GetWirelessProperty(networkID, 'use_global_dns')))
self.chkbox_static_dns.set_active(
bool(wireless.GetWirelessProperty(networkID, 'use_static_dns')))
self.txt_dns_1.set_text(self.format_entry(networkID, "dns1"))
self.txt_dns_2.set_text(self.format_entry(networkID, "dns2"))
self.txt_dns_3.set_text(self.format_entry(networkID, "dns3"))
self.txt_domain.set_text(self.format_entry(networkID, "dns_domain"))
self.txt_search_dom.set_text(
self.format_entry(networkID, "search_domain"))
self.reset_static_checkboxes()
self.chkbox_encryption.set_active(
bool(wireless.GetWirelessProperty(networkID, 'encryption')))
self.chkbox_global_settings.set_active(
bool(
wireless.GetWirelessProperty(networkID, 'use_settings_globally')
)
)
self.chkbox_use_dhcp_hostname.set_active(
bool(wireless.GetWirelessProperty(networkID, 'usedhcphostname')))
dhcphname = wireless.GetWirelessProperty(networkID, "dhcphostname")
if dhcphname is None:
dhcphname = os.uname()[1]
self.txt_dhcp_hostname.set_text(dhcphname)
self.toggle_dhcp_hostname_checkbox()
# TODO: get the value from the config
chosen_bitrate = wireless.GetWirelessProperty(networkID, 'bitrate')
if chosen_bitrate not in self.bitrates:
chosen_bitrate = 'auto'
self.combo_rate.set_active(self.bitrates.index(chosen_bitrate))
self.chkbox_lower_rate.set_active(
bool(
wireless.GetWirelessProperty(networkID, 'allow_lower_bitrates')
)
)
activeID = -1 # Set the menu to this item when we are done
user_enctype = wireless.GetWirelessProperty(networkID, "enctype")
for x, enc_type in enumerate(self.encrypt_types):
if enc_type['type'] == user_enctype:
activeID = x
self.combo_encryption.set_active(activeID)
if activeID != -1:
self.chkbox_encryption.set_active(True)
self.combo_encryption.set_sensitive(True)
self.vbox_encrypt_info.set_sensitive(True)
else:
self.combo_encryption.set_active(0)
self.change_encrypt_method()
def save_settings(self, networkid=None):
# Check encryption info
encrypt_info = self.encryption_info
if self.chkbox_encryption.get_active():
print "setting encryption info..."
encrypt_methods = self.encrypt_types
self.set_net_prop(
"enctype",
encrypt_methods[self.combo_encryption.get_active()]['type']
)
# Make sure all required fields are filled in.
for entry_info in encrypt_info.itervalues():
if entry_info[0].entry.get_text() == "" and \
entry_info[1] == 'required':
error(
self,
"%s (%s)" % (
_('Required encryption information is missing.'),
entry_info[0].label.get_label()
)
)
return False
# Now save all the entries.
for entry_key, entry_info in encrypt_info.iteritems():
self.set_net_prop(entry_key,
noneToString(entry_info[0].entry.get_text()))
elif not self.chkbox_encryption.get_active() and \
wireless.GetWirelessProperty(networkid, "encryption"):
# Encrypt checkbox is off, but the network needs it.
error(self, _('This network requires encryption to be enabled.'))
return False
else:
print "no encryption specified..."
self.set_net_prop("enctype", "None")
AdvancedSettingsDialog.save_settings(self)
if self.chkbox_global_settings.get_active():
self.set_net_prop('use_settings_globally', True)
else:
self.set_net_prop('use_settings_globally', False)
wireless.RemoveGlobalEssidEntry(networkid)
if self.combo_rate.get_active() == -1:
self.set_net_prop('bitrate', 'auto')
else:
self.set_net_prop(
'bitrate',
self.bitrates[self.combo_rate.get_active()]
)
self.set_net_prop(
'allow_lower_bitrates',
bool(self.chkbox_lower_rate.get_active())
)
wireless.SaveWirelessNetworkProfile(networkid)
return True
def format_entry(self, networkid, label):
""" Helper method for fetching/formatting wireless properties. """
return noneToBlankString(wireless.GetWirelessProperty(networkid, label))
class NetworkEntry(gtk.HBox):
""" Network entry. """
def __init__(self):
""" Base network entry class.
Provides gtk objects used by both the WiredNetworkEntry and
WirelessNetworkEntry classes.
"""
setup_dbus()
gtk.HBox.__init__(self, False, 2)
self.image = gtk.Image()
self.pack_start(self.image, False, False)
# Create an HBox to hold the buttons
self.buttons_hbox = gtk.HBox(False, 6)
# Set up the Connect button
self.connect_button = gtk.Button(stock=gtk.STOCK_CONNECT)
self.connect_hbox = gtk.HBox(False, 2)
self.connect_hbox.pack_start(self.connect_button, False, False)
self.connect_hbox.show()
# Set up the Disconnect button
self.disconnect_button = gtk.Button(stock=gtk.STOCK_DISCONNECT)
self.connect_hbox.pack_start(self.disconnect_button, False, False)
# Create a label to hold the name of the entry
self.name_label = gtk.Label()
self.name_label.set_alignment(0, 0.5)
# Set up the VBox that goes in the gtk.Expander
self.expander_vbox = gtk.VBox(False, 1)
self.expander_vbox.show()
self.pack_end(self.expander_vbox)
# Set up the advanced settings button
self.advanced_button = gtk.Button()
self.advanced_image = gtk.Image()
self.advanced_image.set_from_stock(gtk.STOCK_EDIT, 4)
self.advanced_image.set_padding(4, 0)
self.advanced_button.set_alignment(.5, .5)
self.advanced_button.set_label(_('Properties'))
self.advanced_button.set_image(self.advanced_image)
self.buttons_hbox.pack_start(self.connect_hbox, False, False)
self.buttons_hbox.pack_start(self.advanced_button, False, False)
self.vbox_top = gtk.VBox(False, 0)
self.expander_vbox.pack_start(self.name_label)
self.expander_vbox.pack_start(self.vbox_top)
self.expander_vbox.pack_start(self.buttons_hbox)
def destroy_called(self, *args):
""" Clean up everything. """
super(NetworkEntry, self).destroy()
self.destroy()
del self
class WiredNetworkEntry(NetworkEntry):
""" Wired network entry. """
def __init__(self):
""" Load the wired network entry. """
NetworkEntry.__init__(self)
# Center the picture and pad it a bit
self.image.set_padding(0, 0)
self.image.set_alignment(.5, .5)
self.image.set_size_request(60, -1)
self.image.set_from_icon_name("wired-gui", gtk.ICON_SIZE_DND)
self.image.show()
self.connect_button.show()
self.name_label.set_use_markup(True)
self.name_label.set_label("<b>" + _('Wired Network') + "</b>")
self.is_full_gui = True
self.button_add = gtk.Button(stock=gtk.STOCK_ADD)
self.button_delete = gtk.Button(stock=gtk.STOCK_DELETE)
self.profile_help = gtk.Label(
_('To connect to a wired network, you must create a network '
'profile. To create a network profile, type a name that describes '
'this network, and press Add.')
)
self.chkbox_default_profile = gtk.CheckButton(
_('Use as default profile (overwrites any previous default)'))
self.combo_profile_names = gtk.combo_box_new_text()
# Format the profile help label.
self.profile_help.set_justify(gtk.JUSTIFY_LEFT)
self.profile_help.set_line_wrap(True)
# Pack the various VBox objects.
self.hbox_temp = gtk.HBox(False, 0)
self.hbox_def = gtk.HBox(False, 0)
self.vbox_top.pack_start(self.profile_help, True, True)
self.vbox_top.pack_start(self.hbox_def)
self.vbox_top.pack_start(self.hbox_temp)
self.hbox_temp.pack_start(self.combo_profile_names, True, True)
self.hbox_temp.pack_start(self.button_add, False, False)
self.hbox_temp.pack_start(self.button_delete, False, False)
self.hbox_def.pack_start(self.chkbox_default_profile, False, False)
# Connect events
self.button_add.connect("clicked", self.add_profile)
self.button_delete.connect("clicked", self.remove_profile)
self.chkbox_default_profile.connect("toggled",
self.toggle_default_profile)
self.combo_profile_names.connect("changed", self.change_profile)
# Build profile list.
self.profile_list = wired.GetWiredProfileList()
default_prof = wired.GetDefaultWiredNetwork()
if self.profile_list:
starting_index = 0
for x, prof in enumerate(self.profile_list):
self.combo_profile_names.append_text(prof)
if default_prof == prof:
starting_index = x
self.combo_profile_names.set_active(starting_index)
else:
print "no wired profiles found"
self.profile_help.show()
self.advanced_dialog = \
WiredSettingsDialog(self.combo_profile_names.get_active_text())
# Show everything, but hide the profile help label.
self.show_all()
self.profile_help.hide()
# Toggle the default profile checkbox to the correct state.
if to_bool(wired.GetWiredProperty("default")):
self.chkbox_default_profile.set_active(True)
else:
self.chkbox_default_profile.set_active(False)
self.check_enable()
self.wireddis = self.connect("destroy", self.destroy_called)
def destroy_called(self, *args):
""" Clean up everything. """
self.disconnect(self.wireddis)
self.advanced_dialog.destroy_called()
del self.advanced_dialog
super(WiredNetworkEntry, self).destroy_called()
self.destroy()
del self
def save_wired_settings(self):
""" Save wired network settings. """
return self.advanced_dialog.save_settings()
def check_enable(self):
""" Disable objects if the profile list is empty. """
profile_list = wired.GetWiredProfileList()
if not profile_list:
self.button_delete.set_sensitive(False)
self.connect_button.set_sensitive(False)
self.advanced_button.set_sensitive(False)
def update_connect_button(self, state, apbssid=None):
""" Update the connection/disconnect button for this entry. """
if state == misc.WIRED:
self.disconnect_button.show()
self.connect_button.hide()
else:
self.disconnect_button.hide()
self.connect_button.show()
def add_profile(self, widget):
""" Add a profile to the profile list. """
response = string_input(
"Enter a profile name", "The profile name will not be used by the "
"computer. It allows you to easily distinguish between different "
"network profiles.",
"Profile name:"
).strip()
# if response is "" or None
if not response:
error(None, "Invalid profile name", block=True)
return False
profile_name = response
profile_list = wired.GetWiredProfileList()
if profile_list:
if profile_name in profile_list:
return False
self.profile_help.hide()
wired.CreateWiredNetworkProfile(profile_name, False)
self.combo_profile_names.prepend_text(profile_name)
self.combo_profile_names.set_active(0)
self.advanced_dialog.prof_name = profile_name
if self.is_full_gui:
self.button_delete.set_sensitive(True)
self.connect_button.set_sensitive(True)
self.advanced_button.set_sensitive(True)
def remove_profile(self, widget):
""" Remove a profile from the profile list. """
print "removing profile"
profile_name = self.combo_profile_names.get_active_text()
wired.DeleteWiredNetworkProfile(profile_name)
self.combo_profile_names.remove_text(self.combo_profile_names.
get_active())
self.combo_profile_names.set_active(0)
self.advanced_dialog.prof_name = \
self.combo_profile_names.get_active_text()
if not wired.GetWiredProfileList():
self.profile_help.show()
entry = self.combo_profile_names.child
entry.set_text("")
if self.is_full_gui:
self.button_delete.set_sensitive(False)
self.advanced_button.set_sensitive(False)
self.connect_button.set_sensitive(False)
else:
self.profile_help.hide()
def toggle_default_profile(self, widget):
""" Change the default profile. """
if self.chkbox_default_profile.get_active():
# Make sure there is only one default profile at a time
wired.UnsetWiredDefault()
wired.SetWiredProperty("default",
self.chkbox_default_profile.get_active())
wired.SaveWiredNetworkProfile(
self.combo_profile_names.get_active_text())
def change_profile(self, widget):
""" Called when a new profile is chosen from the list. """
# Make sure the name doesn't change everytime someone types something
if self.combo_profile_names.get_active() > -1:
if not self.is_full_gui:
return
profile_name = self.combo_profile_names.get_active_text()
wired.ReadWiredNetworkProfile(profile_name)
if hasattr(self, 'advanced_dialog'):
self.advanced_dialog.prof_name = profile_name
self.advanced_dialog.set_values()
is_default = wired.GetWiredProperty("default")
self.chkbox_default_profile.set_active(to_bool(is_default))
def format_entry(self, label):
""" Help method for fetching/formatting wired properties. """
return noneToBlankString(wired.GetWiredProperty(label))
class WirelessNetworkEntry(NetworkEntry):
""" Wireless network entry. """
def __init__(self, networkID):
""" Build the wireless network entry. """
NetworkEntry.__init__(self)
self.networkID = networkID
self.image.set_padding(0, 0)
self.image.set_alignment(.5, .5)
self.image.set_size_request(60, -1)
self.image.show()
self.essid = noneToBlankString(
wireless.GetWirelessProperty(networkID, "essid"))
self.lbl_strength = GreyLabel()
self.lbl_encryption = GreyLabel()
self.lbl_channel = GreyLabel()
print "ESSID : " + self.essid
self.chkbox_autoconnect = gtk.CheckButton(
_('Automatically connect to this network'))
self.chkbox_neverconnect = gtk.CheckButton(
_('Never connect to this network'))
self.set_signal_strength(
wireless.GetWirelessProperty(networkID, 'quality'),
wireless.GetWirelessProperty(networkID, 'strength')
)
self.set_encryption(
wireless.GetWirelessProperty(networkID, 'encryption'),
wireless.GetWirelessProperty(networkID, 'encryption_method')
)
self.set_channel(wireless.GetWirelessProperty(networkID, 'channel'))
self.name_label.set_use_markup(True)
self.name_label.set_label(
"<b>%s</b> %s %s %s" % (
self._escape(self.essid),
self.lbl_strength.get_label(),
self.lbl_encryption.get_label(),
self.lbl_channel.get_label(),
)
)
# Add the wireless network specific parts to the NetworkEntry
# VBox objects.
self.vbox_top.pack_start(self.chkbox_autoconnect, False, False)
self.vbox_top.pack_start(self.chkbox_neverconnect, False, False)
if to_bool(self.format_entry(networkID, "automatic")):
self.chkbox_autoconnect.set_active(True)
else:
self.chkbox_autoconnect.set_active(False)
if to_bool(self.format_entry(networkID, "never")):
self.chkbox_autoconnect.set_sensitive(False)
self.connect_button.set_sensitive(False)
self.chkbox_neverconnect.set_active(True)
else:
self.chkbox_neverconnect.set_active(False)
# Connect signals.
self.chkbox_autoconnect.connect("toggled", self.update_autoconnect)
self.chkbox_neverconnect.connect("toggled", self.update_neverconnect)
# Show everything
self.show_all()
self.advanced_dialog = WirelessSettingsDialog(networkID)
self.wifides = self.connect("destroy", self.destroy_called)
def _escape(self, val):
""" Escapes special characters so they're displayed correctly. """
return val.replace("&", "&"). \
replace("<", "<"). \
replace(">", ">"). \
replace("'", "'"). \
replace('"', """)
def save_wireless_settings(self, networkid):
""" Save wireless network settings. """
return self.advanced_dialog.save_settings(networkid)
def update_autoconnect(self, widget=None):
""" Called when the autoconnect checkbox is toggled. """
wireless.SetWirelessProperty(
self.networkID,
"automatic",
noneToString(self.chkbox_autoconnect. get_active())
)
wireless.SaveWirelessNetworkProperty(self.networkID, "automatic")
def update_neverconnect(self, widget=None):
""" Called when the neverconnect checkbox is toggled. """
wireless.SetWirelessProperty(
self.networkID,
"never",
noneToString(self.chkbox_neverconnect.get_active())
)
wireless.SaveWirelessNetworkProperty(self.networkID, "never")
if self.chkbox_neverconnect.get_active():
self.chkbox_autoconnect.set_sensitive(False)
self.connect_button.set_sensitive(False)
else:
self.chkbox_autoconnect.set_sensitive(True)
self.connect_button.set_sensitive(True)
def destroy_called(self, *args):
""" Clean up everything. """
self.disconnect(self.wifides)
self.advanced_dialog.destroy_called()
del self.advanced_dialog
super(WirelessNetworkEntry, self).destroy_called()
self.destroy()
del self
def update_connect_button(self, state, apbssid):
""" Update the connection/disconnect button for this entry. """
if to_bool(self.format_entry(self.networkID, "never")):
self.connect_button.set_sensitive(False)
if not apbssid:
apbssid = wireless.GetApBssid()
if state == misc.WIRELESS and \
apbssid == wireless.GetWirelessProperty(self.networkID, "bssid"):
self.disconnect_button.show()
self.connect_button.hide()
else:
self.disconnect_button.hide()
self.connect_button.show()
def set_signal_strength(self, strength, dbm_strength):
""" Set the signal strength displayed in the WirelessNetworkEntry. """
if strength:
strength = int(strength)
else:
strength = -1
if dbm_strength:
dbm_strength = int(dbm_strength)
else:
dbm_strength = -100
display_type = daemon.GetSignalDisplayType()
if daemon.GetWPADriver() == 'ralink legacy' or display_type == 1:
# Use the -xx dBm signal strength to display a signal icon
# I'm not sure how accurately the dBm strength is being
# "converted" to strength bars, so suggestions from people
# for a better way would be welcome.
if dbm_strength >= -60:
signal_img = 'signal-100'
elif dbm_strength >= -70:
signal_img = 'signal-75'
elif dbm_strength >= -80:
signal_img = 'signal-50'
else:
signal_img = 'signal-25'
ending = "dBm"
disp_strength = str(dbm_strength)
else:
# Uses normal link quality, should be fine in most cases
if strength > 75:
signal_img = 'signal-100'
elif strength > 50:
signal_img = 'signal-75'
elif strength > 25:
signal_img = 'signal-50'
else:
signal_img = 'signal-25'
ending = "%"
disp_strength = str(strength)
self.image.set_from_icon_name(signal_img, gtk.ICON_SIZE_DND)
self.lbl_strength.set_label(disp_strength + ending)
self.image.show()
def set_encryption(self, on, ttype):
""" Set the encryption value for the WirelessNetworkEntry. """
if on and ttype:
self.lbl_encryption.set_label(str(ttype))
if on and not ttype:
self.lbl_encryption.set_label(_('Secured'))
if not on:
self.lbl_encryption.set_label(_('Unsecured'))
def set_channel(self, channel):
""" Set the channel value for the WirelessNetworkEntry. """
self.lbl_channel.set_label(_('Channel') + ' ' + str(channel))
def format_entry(self, networkid, label):
""" Helper method for fetching/formatting wireless properties. """
return noneToBlankString(wireless.GetWirelessProperty(networkid, label))
class WirelessInformationDialog(gtk.Dialog):
""" Wireless information dialog. """
def __init__(self, networkID, parent):
gtk.Dialog.__init__(self, parent=parent)
# Make the combo box.
self.lbl_strength = gtk.Label()
self.lbl_strength.set_alignment(0, 0.5)
self.lbl_encryption = gtk.Label()
self.lbl_encryption.set_alignment(0, 0.5)
self.lbl_mac = gtk.Label()
self.lbl_mac.set_alignment(0, 0.5)
self.lbl_channel = gtk.Label()
self.lbl_channel.set_alignment(0, 0.5)
self.lbl_mode = gtk.Label()
self.lbl_mode.set_alignment(0, 0.5)
self.hbox_status = gtk.HBox(False, 5)
# Set the values of the network info labels.
self.set_signal_strength(
wireless.GetWirelessProperty(networkID, 'quality'),
wireless.GetWirelessProperty(networkID, 'strength')
)
self.set_mac_address(wireless.GetWirelessProperty(networkID, 'bssid'))
self.set_mode(wireless.GetWirelessProperty(networkID, 'mode'))
self.set_channel(wireless.GetWirelessProperty(networkID, 'channel'))
self.set_encryption(
wireless.GetWirelessProperty(networkID, 'encryption'),
wireless.GetWirelessProperty(networkID, 'encryption_method')
)
self.set_title('Network Information')
vbox = self.vbox
self.set_has_separator(False)
table = gtk.Table(5, 2)
table.set_col_spacings(12)
# pylint: disable-msg=E1101
vbox.pack_start(table)
# Pack the network status HBox.
table.attach(LeftAlignedLabel('Signal strength:'), 0, 1, 0, 1)
table.attach(self.lbl_strength, 1, 2, 0, 1)
table.attach(LeftAlignedLabel('Encryption type:'), 0, 1, 1, 2)
table.attach(self.lbl_encryption, 1, 2, 1, 2)
table.attach(LeftAlignedLabel('Access point address:'), 0, 1, 2, 3)
table.attach(self.lbl_mac, 1, 2, 2, 3)
table.attach(LeftAlignedLabel('Mode:'), 0, 1, 3, 4)
table.attach(self.lbl_mode, 1, 2, 3, 4)
table.attach(LeftAlignedLabel('Channel:'), 0, 1, 4, 5)
table.attach(self.lbl_channel, 1, 2, 4, 5)
# pylint: disable-msg=E1101
vbox.show_all()
self.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
self.show()
self.run()
self.destroy()
def set_signal_strength(self, strength, dbm_strength):
""" Set the signal strength displayed in the WirelessNetworkEntry. """
if strength is not None:
strength = int(strength)
else:
strength = -1
if dbm_strength is not None:
dbm_strength = int(dbm_strength)
else:
dbm_strength = -100
display_type = daemon.GetSignalDisplayType()
if daemon.GetWPADriver() == 'ralink legacy' or display_type == 1:
# Use the -xx dBm signal strength to display a signal icon
# I'm not sure how accurately the dBm strength is being
# "converted" to strength bars, so suggestions from people
# for a better way would be welcome.
if dbm_strength >= -60:
signal_img = 'signal-100.png'
elif dbm_strength >= -70:
signal_img = 'signal-75.png'
elif dbm_strength >= -80:
signal_img = 'signal-50.png'
else:
signal_img = 'signal-25.png'
ending = "dBm"
disp_strength = str(dbm_strength)
else:
# Uses normal link quality, should be fine in most cases
if strength > 75:
signal_img = 'signal-100.png'
elif strength > 50:
signal_img = 'signal-75.png'
elif strength > 25:
signal_img = 'signal-50.png'
else:
signal_img = 'signal-25.png'
ending = "%"
disp_strength = str(strength)
self.lbl_strength.set_label(disp_strength + ending)
def set_mac_address(self, address):
""" Set the MAC address for the WirelessNetworkEntry. """
self.lbl_mac.set_label(str(address))
def set_encryption(self, on, ttype):
""" Set the encryption value for the WirelessNetworkEntry. """
if on and ttype:
self.lbl_encryption.set_label(str(ttype))
if on and not ttype:
self.lbl_encryption.set_label(_('Secured'))
if not on:
self.lbl_encryption.set_label(_('Unsecured'))
def set_channel(self, channel):
""" Set the channel value for the WirelessNetworkEntry. """
self.lbl_channel.set_label(_('Channel') + ' ' + str(channel))
def set_mode(self, mode):
""" Set the mode value for the WirelessNetworkEntry. """
self.lbl_mode.set_label(str(mode))
def format_entry(self, networkid, label):
""" Helper method for fetching/formatting wireless properties. """
return noneToBlankString(wireless.GetWirelessProperty(networkid, label))
|
johnboiles/wicd
|
gtk/netentry.py
|
Python
|
gpl-2.0
| 53,597
|
from .core import ( # noqa
AssetID,
AssetIDPlusDay,
EPOCH,
ExplodingObject,
FakeDataPortal,
FetcherDataPortal,
MockDailyBarReader,
OpenPrice,
RecordBatchBlotter,
add_security_data,
all_pairs_matching_predicate,
all_subindices,
assert_single_position,
assert_timestamp_equal,
check_allclose,
check_arrays,
chrange,
create_daily_df_for_asset,
create_data_portal,
create_data_portal_from_trade_history,
create_empty_splits_mergers_frame,
create_minute_bar_data,
create_minute_df_for_asset,
drain_zipline,
empty_asset_finder,
empty_assets_db,
make_alternating_boolean_array,
make_cascading_boolean_array,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
patch_os_environment,
patch_read_csv,
permute_rows,
powerset,
prices_generating_returns,
product_upper_triangle,
read_compressed,
seconds_to_timestamp,
security_list_copy,
simulate_minutes_for_day,
str_to_seconds,
subtest,
temp_pipeline_engine,
test_resource_path,
tmp_asset_finder,
tmp_assets_db,
tmp_bcolz_equity_minute_bar_reader,
tmp_dir,
to_series,
to_utc,
trades_by_sid_to_dfs,
write_bcolz_minute_data,
write_compressed,
)
from .fixtures import ZiplineTestCase # noqa
|
quantopian/zipline
|
zipline/testing/__init__.py
|
Python
|
apache-2.0
| 1,361
|
"Fits an example function"
from __future__ import division
from numpy import logspace, log, log10, random
from gpfit.fit import fit
# fixed initial guess for fitting
random.seed(33404)
u = logspace(0, log10(3), 101)
w = (u**2 + 3) / (u + 1)**2
x = log(u)
y = log(w)
K = 3
cMA, errorMA = fit(x, y, K, "MA")
cSMA, errorSMA = fit(x, y, K, "SMA")
cISMA, errorISMA = fit(x, y, K, "ISMA")
print("MA RMS Error: %.5g" % errorMA)
print("SMA RMS Error: %.5g" % errorSMA)
print("ISMA RMS Error: %.5g" % errorISMA)
|
convexopt/gpfit
|
docs/source/examples/hoburgabbeel_ex6_1.py
|
Python
|
mit
| 507
|
"""
Generate CPU features tables from CCompilerOpt
"""
from os import sys, path
from numpy.distutils.ccompiler_opt import CCompilerOpt
class FakeCCompilerOpt(CCompilerOpt):
# disable caching no need for it
conf_nocache = True
def __init__(self, arch, cc, *args, **kwargs):
self.fake_info = (arch, cc, '')
CCompilerOpt.__init__(self, None, **kwargs)
def dist_compile(self, sources, flags, **kwargs):
return sources
def dist_info(self):
return self.fake_info
@staticmethod
def dist_log(*args, stderr=False):
# avoid printing
pass
def feature_test(self, name, force_flags=None, macros=[]):
# To speed up
return True
class Features:
def __init__(self, arch, cc):
self.copt = FakeCCompilerOpt(arch, cc, cpu_baseline="max")
def names(self):
return self.copt.cpu_baseline_names()
def serialize(self, features_names):
result = []
for f in self.copt.feature_sorted(features_names):
gather = self.copt.feature_supported.get(f, {}).get("group", [])
implies = self.copt.feature_sorted(self.copt.feature_implies(f))
result.append((f, implies, gather))
return result
def table(self, **kwargs):
return self.gen_table(self.serialize(self.names()), **kwargs)
def table_diff(self, vs, **kwargs):
fnames = set(self.names())
fnames_vs = set(vs.names())
common = fnames.intersection(fnames_vs)
extra = fnames.difference(fnames_vs)
notavl = fnames_vs.difference(fnames)
iextra = {}
inotavl = {}
idiff = set()
for f in common:
implies = self.copt.feature_implies(f)
implies_vs = vs.copt.feature_implies(f)
e = implies.difference(implies_vs)
i = implies_vs.difference(implies)
if not i and not e:
continue
if e:
iextra[f] = e
if i:
inotavl[f] = e
idiff.add(f)
def fbold(f):
if f in extra:
return f':enabled:`{f}`'
if f in notavl:
return f':disabled:`{f}`'
return f
def fbold_implies(f, i):
if i in iextra.get(f, {}):
return f':enabled:`{i}`'
if f in notavl or i in inotavl.get(f, {}):
return f':disabled:`{i}`'
return i
diff_all = self.serialize(idiff.union(extra))
diff_all += vs.serialize(notavl)
content = self.gen_table(
diff_all, fstyle=fbold, fstyle_implies=fbold_implies, **kwargs
)
return content
def gen_table(self, serialized_features, fstyle=None, fstyle_implies=None,
**kwargs):
if fstyle is None:
fstyle = lambda ft: f'``{ft}``'
if fstyle_implies is None:
fstyle_implies = lambda origin, ft: fstyle(ft)
rows = []
have_gather = False
for f, implies, gather in serialized_features:
if gather:
have_gather = True
name = fstyle(f)
implies = ' '.join([fstyle_implies(f, i) for i in implies])
gather = ' '.join([fstyle_implies(f, i) for i in gather])
rows.append((name, implies, gather))
if not rows:
return ''
fields = ["Name", "Implies", "Gathers"]
if not have_gather:
del fields[2]
rows = [(name, implies) for name, implies, _ in rows]
return self.gen_rst_table(fields, rows, **kwargs)
def gen_rst_table(self, field_names, rows, tab_size=4):
assert(not rows or len(field_names) == len(rows[0]))
rows.append(field_names)
fld_len = len(field_names)
cls_len = [max(len(c[i]) for c in rows) for i in range(fld_len)]
del rows[-1]
cformat = ' '.join('{:<%d}' % i for i in cls_len)
border = cformat.format(*['='*i for i in cls_len])
rows = [cformat.format(*row) for row in rows]
# header
rows = [border, cformat.format(*field_names), border] + rows
# footer
rows += [border]
# add left margin
rows = [(' ' * tab_size) + r for r in rows]
return '\n'.join(rows)
def wrapper_section(title, content, tab_size=4):
tab = ' '*tab_size
if content:
return (
f"{title}\n{'~'*len(title)}"
f"\n.. table::\n{tab}:align: left\n\n"
f"{content}\n\n"
)
return ''
def wrapper_tab(title, table, tab_size=4):
tab = ' '*tab_size
if table:
('\n' + tab).join((
'.. tab:: ' + title,
tab + '.. table::',
tab + 'align: left',
table + '\n\n'
))
return ''
if __name__ == '__main__':
pretty_names = {
"PPC64": "IBM/POWER big-endian",
"PPC64LE": "IBM/POWER little-endian",
"S390X": "IBM/ZSYSTEM(S390X)",
"ARMHF": "ARMv7/A32",
"AARCH64": "ARMv8/A64",
"ICC": "Intel Compiler",
# "ICCW": "Intel Compiler msvc-like",
"MSVC": "Microsoft Visual C/C++"
}
gen_path = path.join(
path.dirname(path.realpath(__file__)), "generated_tables"
)
with open(path.join(gen_path, 'cpu_features.inc'), 'wt') as fd:
fd.write(f'.. generated via {__file__}\n\n')
for arch in (
("x86", "PPC64", "PPC64LE", "ARMHF", "AARCH64", "S390X")
):
title = "On " + pretty_names.get(arch, arch)
table = Features(arch, 'gcc').table()
fd.write(wrapper_section(title, table))
with open(path.join(gen_path, 'compilers-diff.inc'), 'wt') as fd:
fd.write(f'.. generated via {__file__}\n\n')
for arch, cc_names in (
("x86", ("clang", "ICC", "MSVC")),
("PPC64", ("clang",)),
("PPC64LE", ("clang",)),
("ARMHF", ("clang",)),
("AARCH64", ("clang",)),
("S390X", ("clang",))
):
arch_pname = pretty_names.get(arch, arch)
for cc in cc_names:
title = f"On {arch_pname}::{pretty_names.get(cc, cc)}"
table = Features(arch, cc).table_diff(Features(arch, "gcc"))
fd.write(wrapper_section(title, table))
|
anntzer/numpy
|
doc/source/reference/simd/gen_features.py
|
Python
|
bsd-3-clause
| 6,371
|
import sys
name = sys.argv[1]
handle = open(name, 'r')
text = handle.read()
print(name, 'is', len(text), 'bytes')
|
crookedreyes/py4e-specialization
|
code3/argfile.py
|
Python
|
lgpl-2.1
| 115
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTetoolkit(PythonPackage):
"""TEToolkit is a software package that utilizes both unambiguously
(uniquely) and ambiguously (multi-) mapped reads to perform
differential enrichment analyses from high throughput sequencing
experiments."""
homepage = "http://hammelllab.labsites.cshl.edu/software"
url = "https://pypi.io/packages/source/T/TEToolkit/TEToolkit-1.5.1.tar.gz"
version('2.0.3', sha256='1d0f5928b30c6cd9dbef8e092ae0c11e9e707faf92a19af8eed3e360da7d4e46')
version('1.5.1', sha256='22c13ca45bccc89e9d9bf48d59ae6db1fa4c634def64fc56ba9bffd23aa689ac')
depends_on('py-setuptools')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('r-deseq', when='@:1.5.1', type=('build', 'run'))
depends_on('r-deseq2', when='@2.0.0:', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-tetoolkit/package.py
|
Python
|
lgpl-2.1
| 1,095
|
"""Support for Toon binary sensors."""
from datetime import timedelta
import logging
from typing import Any
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import (ToonEntity, ToonDisplayDeviceEntity, ToonBoilerDeviceEntity,
ToonBoilerModuleDeviceEntity)
from .const import DATA_TOON_CLIENT, DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
SCAN_INTERVAL = timedelta(seconds=300)
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry,
async_add_entities) -> None:
"""Set up a Toon binary sensor based on a config entry."""
toon = hass.data[DATA_TOON_CLIENT][entry.entry_id]
sensors = [
ToonBoilerModuleBinarySensor(toon, 'thermostat_info',
'boiler_connected', None,
'Boiler Module Connection',
'mdi:check-network-outline',
'connectivity'),
ToonDisplayBinarySensor(toon, 'thermostat_info', 'active_state', 4,
"Toon Holiday Mode", 'mdi:airport', None),
ToonDisplayBinarySensor(toon, 'thermostat_info', 'next_program', None,
"Toon Program", 'mdi:calendar-clock', None),
]
if toon.thermostat_info.have_ot_boiler:
sensors.extend([
ToonBoilerBinarySensor(toon, 'thermostat_info',
'ot_communication_error', '0',
"OpenTherm Connection",
'mdi:check-network-outline',
'connectivity'),
ToonBoilerBinarySensor(toon, 'thermostat_info', 'error_found', 255,
"Boiler Status", 'mdi:alert', 'problem',
inverted=True),
ToonBoilerBinarySensor(toon, 'thermostat_info', 'burner_info',
None, "Boiler Burner", 'mdi:fire', None),
ToonBoilerBinarySensor(toon, 'thermostat_info', 'burner_info', '2',
"Hot Tap Water", 'mdi:water-pump', None),
ToonBoilerBinarySensor(toon, 'thermostat_info', 'burner_info', '3',
"Boiler Preheating", 'mdi:fire', None),
])
async_add_entities(sensors)
class ToonBinarySensor(ToonEntity, BinarySensorDevice):
"""Defines an Toon binary sensor."""
def __init__(self, toon, section: str, measurement: str, on_value: Any,
name: str, icon: str, device_class: str,
inverted: bool = False) -> None:
"""Initialize the Toon sensor."""
self._state = inverted
self._device_class = device_class
self.section = section
self.measurement = measurement
self.on_value = on_value
self.inverted = inverted
super().__init__(toon, name, icon)
@property
def unique_id(self) -> str:
"""Return the unique ID for this binary sensor."""
return '_'.join([DOMAIN, self.toon.agreement.id, 'binary_sensor',
self.section, self.measurement, str(self.on_value)])
@property
def device_class(self) -> str:
"""Return the device class."""
return self._device_class
@property
def is_on(self) -> bool:
"""Return the status of the binary sensor."""
if self.on_value is not None:
value = self._state == self.on_value
elif self._state is None:
value = False
else:
value = bool(max(0, int(self._state)))
if self.inverted:
return not value
return value
def update(self) -> None:
"""Get the latest data from the binary sensor."""
section = getattr(self.toon, self.section)
self._state = getattr(section, self.measurement)
class ToonBoilerBinarySensor(ToonBinarySensor, ToonBoilerDeviceEntity):
"""Defines a Boiler binary sensor."""
pass
class ToonDisplayBinarySensor(ToonBinarySensor, ToonDisplayDeviceEntity):
"""Defines a Toon Display binary sensor."""
pass
class ToonBoilerModuleBinarySensor(ToonBinarySensor,
ToonBoilerModuleDeviceEntity):
"""Defines a Boiler module binary sensor."""
pass
|
MartinHjelmare/home-assistant
|
homeassistant/components/toon/binary_sensor.py
|
Python
|
apache-2.0
| 4,548
|
#!/usr/bin/python
"""
Cartesian configuration format file parser.
Filter syntax:
* ``,`` means ``OR``
* ``..`` means ``AND``
* ``.`` means ``IMMEDIATELY-FOLLOWED-BY``
* ``(xx=yy)`` where ``xx=VARIANT_NAME`` and ``yy=VARIANT_VALUE``
Example:
::
qcow2..(guest_os=Fedora).14, RHEL.6..raw..boot, smp2..qcow2..migrate..ide
means match all dicts whose names have:
::
(qcow2 AND ((guest_os=Fedora) IMMEDIATELY-FOLLOWED-BY 14)) OR
((RHEL IMMEDIATELY-FOLLOWED-BY 6) AND raw AND boot) OR
(smp2 AND qcow2 AND migrate AND ide)
Note:
* ``qcow2..Fedora.14`` is equivalent to ``Fedora.14..qcow2``.
* ``qcow2..Fedora.14`` is not equivalent to ``qcow2..14.Fedora``.
* ``ide, scsi`` is equivalent to ``scsi, ide``.
Filters can be used in 3 ways:
::
only <filter>
no <filter>
<filter>:
The last one starts a conditional block.
Formal definition: Regexp come from `python <http://docs.python.org/2/library/re.html>`__.
They're not deterministic, but more readable for people. Spaces between
terminals and nonterminals are only for better reading of definitions.
The base of the definitions come verbatim as follows:
::
E = {\\n, #, :, "-", =, +=, <=, ~=, ?=, ?+=, ?<=, !, < , del, @, variants, include, only, no, name, value}
N = {S, DEL, FILTER, FILTER_NAME, FILTER_GROUP, PN_FILTER_GROUP, STAT, VARIANT, VAR-TYPE, VAR-NAME, VAR-NAME-F, VAR, COMMENT, TEXT, DEPS, DEPS-NAME-F, META-DATA, IDENTIFIER}``
I = I^n | n in N // indentation from start of line
// where n is indentation length.
I = I^n+x | n,x in N // indentation with shift
start symbol = S
end symbol = eps
S -> I^0+x STATV | eps
I^n STATV
I^n STATV
I^n STATV -> I^n STATV \\n I^n STATV | I^n STAT | I^n variants VARIANT
I^n STAT -> I^n STAT \\n I^n STAT | I^n COMMENT | I^n include INC
I^n STAT -> I^n del DEL | I^n FILTER
DEL -> name \\n
I^n STAT -> I^n name = VALUE | I^n name += VALUE | I^n name <= VALUE | I^n name ~= VALUE
I^n STAT -> I^n name ?= VALUE | I^n name ?+= VALUE | I^n name ?<= VALUE
VALUE -> TEXT \\n | 'TEXT' \\n | "TEXT" \\n
COMMENT_BLOCK -> #TEXT | //TEXT
COMMENT -> COMMENT_BLOCK\\n
COMMENT -> COMMENT_BLOCK\\n
TEXT = [^\\n] TEXT //python format regexp
I^n variants VAR #comments: add possibility for comment
I^n+x VAR-NAME: DEPS
I^n+x+x2 STATV
I^n VAR-NAME:
IDENTIFIER -> [A-Za-z0-9][A-Za-z0-9_-]*
VARIANT -> VAR COMMENT_BLOCK\\n I^n+x VAR-NAME
VAR -> VAR-TYPE: | VAR-TYPE META-DATA: | : // Named | unnamed variant
VAR-TYPE -> IDENTIFIER
variants _name_ [xxx] [zzz=yyy] [uuu]:
META-DATA -> [IDENTIFIER] | [IDENTIFIER=TEXT] | META-DATA META-DATA
I^n VAR-NAME -> I^n VAR-NAME \\n I^n VAR-NAME | I^n VAR-NAME-N \\n I^n+x STATV
VAR-NAME-N -> - @VAR-NAME-F: DEPS | - VAR-NAME-F: DEPS
VAR-NAME-F -> [a-zA-Z0-9\\._-]+ // Python regexp
DEPS -> DEPS-NAME-F | DEPS-NAME-F,DEPS
DEPS-NAME-F -> [a-zA-Z0-9\\._- ]+ // Python regexp
INC -> name \\n
FILTER_GROUP: STAT
STAT
I^n STAT -> I^n PN_FILTER_GROUP | I^n ! PN_FILTER_GROUP
PN_FILTER_GROUP -> FILTER_GROUP: \\n I^n+x STAT
PN_FILTER_GROUP -> FILTER_GROUP: STAT \\n I^n+x STAT
only FILTER_GROUP
no FILTER_GROUP
FILTER -> only FILTER_GROUP \\n | no FILTER_GROUP \\n
FILTER_GROUP -> FILTER_NAME
FILTER_GROUP -> FILTER_GROUP..FILTER_GROUP
FILTER_GROUP -> FILTER_GROUP,FILTER_GROUP
FILTER_NAME -> FILTER_NAME.FILTER_NAME
FILTER_NAME -> VAR-NAME-F | (VAR-NAME-F=VAR-NAME-F)
:copyright: Red Hat 2008-2013
"""
import os
import collections
import optparse
import logging
import re
import sys
_reserved_keys = set(("name", "shortname", "dep", "_short_name_map_file", "_name_map_file"))
num_failed_cases = 5
class ParserError(Exception):
def __init__(self, msg, line=None, filename=None, linenum=None):
Exception.__init__(self)
self.msg = msg
self.line = line
self.filename = filename
self.linenum = linenum
def __str__(self):
if self.line:
return "%s: %r (%s:%s)" % (self.msg, self.line,
self.filename, self.linenum)
else:
return "%s (%s:%s)" % (self.msg, self.filename, self.linenum)
class LexerError(ParserError):
pass
class MissingIncludeError(Exception):
def __init__(self, line, filename, linenum):
Exception.__init__(self)
self.line = line
self.filename = filename
self.linenum = linenum
def __str__(self):
return ("%r (%s:%s): file does not exist or it's not a regular "
"file" % (self.line, self.filename, self.linenum))
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
def enum(iterator, start_pos=0):
for i in iterator:
yield start_pos, i
start_pos += 1
else:
enum = enumerate
def _match_adjacent(block, ctx, ctx_set):
"""
It try to match as many blocks as possible from context.
:return: Count of matched blocks.
"""
if block[0] not in ctx_set:
return 0
if len(block) == 1:
return 1 # First match and length is 1.
if block[1] not in ctx_set:
return int(ctx[-1] == block[0]) # Check match with last from ctx.
k = 0
i = ctx.index(block[0])
while i < len(ctx): # Try to match all of blocks.
if k > 0 and ctx[i] != block[k]: # Block not match
i -= k - 1
k = 0 # Start from first block in next ctx.
if ctx[i] == block[k]:
k += 1
if k >= len(block): # match all of blocks
break
if block[k] not in ctx_set: # block in not in whole ctx.
break
i += 1
return k
def _might_match_adjacent(block, ctx, ctx_set, descendant_labels):
matched = _match_adjacent(block, ctx, ctx_set)
for elem in block[matched:]: # Try to find rest of blocks in subtree
if elem not in descendant_labels:
# print "Can't match %s, ctx %s" % (block, ctx)
return False
return True
# Filter must inherit from object (otherwise type() won't work)
class Filter(object):
__slots__ = ["filter"]
def __init__(self, lfilter):
self.filter = lfilter
# print self.filter
def match(self, ctx, ctx_set):
for word in self.filter: # Go through ,
for block in word: # Go through ..
if _match_adjacent(block, ctx, ctx_set) != len(block):
break
else:
# print "Filter pass: %s ctx: %s" % (self.filter, ctx)
return True # All match
return False
def might_match(self, ctx, ctx_set, descendant_labels):
# There is some posibility to match in children blocks.
for word in self.filter:
for block in word:
if not _might_match_adjacent(block, ctx, ctx_set,
descendant_labels):
break
else:
return True
# print "Filter not pass: %s ctx: %s" % (self.filter, ctx)
return False
class NoOnlyFilter(Filter):
__slots__ = ("line")
def __init__(self, lfilter, line):
super(NoOnlyFilter, self).__init__(lfilter)
self.line = line
def __eq__(self, o):
if isinstance(o, self.__class__):
if self.filter == o.filter:
return True
return False
class OnlyFilter(NoOnlyFilter):
# pylint: disable=W0613
def is_irrelevant(self, ctx, ctx_set, descendant_labels):
# Matched in this tree.
return self.match(ctx, ctx_set)
def requires_action(self, ctx, ctx_set, descendant_labels):
# Impossible to match in this tree.
return not self.might_match(ctx, ctx_set, descendant_labels)
def might_pass(self, failed_ctx, failed_ctx_set, ctx, ctx_set,
descendant_labels):
for word in self.filter:
for block in word:
if (_match_adjacent(block, ctx, ctx_set) >
_match_adjacent(block, failed_ctx, failed_ctx_set)):
return self.might_match(ctx, ctx_set, descendant_labels)
return False
def __str__(self):
return "Only %s" % (self.filter)
def __repr__(self):
return "Only %s" % (self.filter)
class NoFilter(NoOnlyFilter):
def is_irrelevant(self, ctx, ctx_set, descendant_labels):
return not self.might_match(ctx, ctx_set, descendant_labels)
# pylint: disable=W0613
def requires_action(self, ctx, ctx_set, descendant_labels):
return self.match(ctx, ctx_set)
# pylint: disable=W0613
def might_pass(self, failed_ctx, failed_ctx_set, ctx, ctx_set,
descendant_labels):
for word in self.filter:
for block in word:
if (_match_adjacent(block, ctx, ctx_set) <
_match_adjacent(block, failed_ctx, failed_ctx_set)):
return not self.match(ctx, ctx_set)
return False
def __str__(self):
return "No %s" % (self.filter)
def __repr__(self):
return "No %s" % (self.filter)
class JoinFilter(NoOnlyFilter):
def __str__(self):
return "Join %s" % (self.filter)
def __repr__(self):
return "Join %s" % (self.filter)
class BlockFilter(object):
__slots__ = ["blocked"]
def __init__(self, blocked):
self.blocked = blocked
def apply_to_dict(self, d):
pass
class Condition(NoFilter):
__slots__ = ["content"]
# pylint: disable=W0231
def __init__(self, lfilter, line):
super(Condition, self).__init__(lfilter, line)
self.content = []
def __str__(self):
return "Condition %s:%s" % (self.filter, self.content)
def __repr__(self):
return "Condition %s:%s" % (self.filter, self.content)
class NegativeCondition(OnlyFilter):
__slots__ = ["content"]
# pylint: disable=W0231
def __init__(self, lfilter, line):
super(NegativeCondition, self).__init__(lfilter, line)
self.content = []
def __str__(self):
return "NotCond %s:%s" % (self.filter, self.content)
def __repr__(self):
return "NotCond %s:%s" % (self.filter, self.content)
class StrReader(object):
"""
Preprocess an input string for easy reading.
"""
def __init__(self, s):
"""
Initialize the reader.
:param s: The string to parse.
"""
self.filename = "<string>"
self._lines = []
self._line_index = 0
self._stored_line = None
for linenum, line in enumerate(s.splitlines()):
line = line.rstrip().expandtabs()
stripped_line = line.lstrip()
indent = len(line) - len(stripped_line)
if (not stripped_line or
stripped_line.startswith("#") or
stripped_line.startswith("//")):
continue
self._lines.append((stripped_line, indent, linenum + 1))
def get_next_line(self, prev_indent):
"""
Get the next line in the current block.
:param prev_indent: The indentation level of the previous block.
:return: (line, indent, linenum), where indent is the line's
indentation level. If no line is available, (None, -1, -1) is
returned.
"""
if self._stored_line:
ret = self._stored_line
self._stored_line = None
return ret
if self._line_index >= len(self._lines):
return None, -1, -1
line, indent, linenum = self._lines[self._line_index]
if indent <= prev_indent:
return None, indent, linenum
self._line_index += 1
return line, indent, linenum
def set_next_line(self, line, indent, linenum):
"""
Make the next call to get_next_line() return the given line instead of
the real next line.
"""
line = line.strip()
if line:
self._stored_line = line, indent, linenum
class FileReader(StrReader):
"""
Preprocess an input file for easy reading.
"""
def __init__(self, filename):
"""
Initialize the reader.
:parse filename: The name of the input file.
"""
with open(filename) as f:
StrReader.__init__(self, f.read())
self.filename = filename
class Label(object):
__slots__ = ["name", "var_name", "long_name", "hash_val", "hash_var"]
def __init__(self, name, next_name=None):
if next_name is None:
self.name = name
self.var_name = None
else:
self.name = next_name
self.var_name = name
if self.var_name is None:
self.long_name = "%s" % (self.name)
else:
self.long_name = "(%s=%s)" % (self.var_name, self.name)
self.hash_val = self.hash_name()
self.hash_var = None
if self.var_name:
self.hash_var = self.hash_variant()
def __str__(self):
return self.long_name
def __repr__(self):
return self.long_name
def __eq__(self, o):
"""
The comparison is asymmetric due to optimization.
"""
if o.var_name:
if self.long_name == o.long_name:
return True
else:
if self.name == o.name:
return True
return False
def __ne__(self, o):
"""
The comparison is asymmetric due to optimization.
"""
if o.var_name:
if self.long_name != o.long_name:
return True
else:
if self.name != o.name:
return True
return False
def __hash__(self):
return self.hash_val
def hash_name(self):
return sum([i + 1 * ord(x) for i, x in enumerate(self.name)])
def hash_variant(self):
return sum([i + 1 * ord(x) for i, x in enumerate(str(self))])
class Node(object):
__slots__ = ["var_name", "name", "filename", "dep", "content", "children",
"labels", "append_to_shortname", "failed_cases", "default",
"q_dict"]
def __init__(self):
self.var_name = []
self.name = []
self.filename = ""
self.dep = []
self.content = []
self.children = []
self.labels = set()
self.append_to_shortname = False
self.failed_cases = collections.deque()
self.default = False
def dump(self, indent, recurse=False):
print("%s%s" % (" " * indent, self.name))
print("%s%s" % (" " * indent, self.var_name))
print("%s%s" % (" " * indent, self))
print("%s%s" % (" " * indent, self.content))
print("%s%s" % (" " * indent, self.failed_cases))
if recurse:
for child in self.children:
child.dump(indent + 3, recurse)
match_substitute = re.compile("\$\{(.+?)\}")
def _substitution(value, d):
"""
Only optimization string Template substitute is quite expensive operation.
:param value: String where could be $string for substitution.
:param d: Dictionary from which should be value substituted to value.
:return: Substituted string
"""
if "$" in value:
start = 0
st = ""
try:
match = match_substitute.search(value, start)
while match:
val = eval(match.group(1), None, d)
st += value[start:match.start()] + str(val)
start = match.end()
match = match_substitute.search(value, start)
except Exception:
pass
st += value[start:len(value)]
return st
else:
return value
class Token(object):
__slots__ = []
identifier = ""
def __str__(self):
return self.identifier
def __repr__(self):
return "'%s'" % self.identifier
def __ne__(self, o):
"""
The comparison is asymmetric due to optimization.
"""
if o.identifier != self.identifier:
return True
return False
class LIndent(Token):
__slots__ = ["length"]
identifier = "indent"
def __init__(self, length):
self.length = length
def __str__(self):
return "%s %s" % (self.identifier, self.length)
def __repr__(self):
return "%s %s" % (self.identifier, self.length)
class LEndL(Token):
__slots__ = []
identifier = "endl"
class LEndBlock(LIndent):
__slots__ = []
pass
class LIdentifier(str):
__slots__ = []
identifier = "Identifier re([A-Za-z0-9][A-Za-z0-9_-]*)"
def __str__(self):
return super(LIdentifier, self).__str__()
def __repr__(self):
return "'%s'" % self
def checkChar(self, chars):
for t in self:
if not (t in chars):
raise ParserError("Wrong char %s in %s" % (t, self))
return self
def checkAlpha(self):
"""
Check if string contain only chars
"""
if not self.isalpha():
raise ParserError("Some of chars is not alpha in %s" % (self))
return self
def checkNumbers(self):
"""
Check if string contain only chars
"""
if not self.isdigit():
raise ParserError("Some of chars is not digit in %s" % (self))
return self
def checkCharAlpha(self, chars):
"""
Check if string contain only chars
"""
for t in self:
if not (t in chars or t.isalpha()):
raise ParserError("Char %s is not alpha or one of special"
"chars [%s] in %s" % (t, chars, self))
return self
def checkCharAlphaNum(self, chars):
"""
Check if string contain only chars
"""
for t in self:
if not (t in chars or t.isalnum()):
raise ParserError("Char %s is not alphanum or one of special"
"chars [%s] in %s" % (t, chars, self))
return self
def checkCharNumeric(self, chars):
"""
Check if string contain only chars
"""
for t in self:
if not (t in chars or t.isdigit()):
raise ParserError("Char %s is not digit or one of special"
"chars [%s] in %s" % (t, chars, self))
return self
class LWhite(LIdentifier):
__slots__ = []
identifier = "WhiteSpace re(\\s)"
class LString(LIdentifier):
__slots__ = []
identifier = "String re(.+)"
class LColon(Token):
__slots__ = []
identifier = ":"
class LVariants(Token):
__slots__ = []
identifier = "variants"
class LDot(Token):
__slots__ = []
identifier = "."
class LVariant(Token):
__slots__ = []
identifier = "-"
class LDefault(Token):
__slots__ = []
identifier = "@"
class LOnly(Token):
__slots__ = []
identifier = "only"
class LSuffix(Token):
__slots__ = []
identifier = "suffix"
class LJoin(Token):
__slots__ = []
identifier = "join"
class LNo(Token):
__slots__ = []
identifier = "no"
class LCond(Token):
__slots__ = []
identifier = ""
class LNotCond(Token):
__slots__ = []
identifier = "!"
class LOr(Token):
__slots__ = []
identifier = ","
class LAnd(Token):
__slots__ = []
identifier = ".."
class LCoc(Token):
__slots__ = []
identifier = "."
class LComa(Token):
__slots__ = []
identifier = ","
class LLBracket(Token):
__slots__ = []
identifier = "["
class LRBracket(Token):
__slots__ = []
identifier = "]"
class LLRBracket(Token):
__slots__ = []
identifier = "("
class LRRBracket(Token):
__slots__ = []
identifier = ")"
class LRegExpStart(Token):
__slots__ = []
identifier = "${"
class LRegExpStop(Token):
__slots__ = []
identifier = "}"
class LInclude(Token):
__slots__ = []
identifier = "include"
class LOperators(Token):
__slots__ = ["name", "value"]
identifier = ""
function = None
def set_operands(self, name, value):
# pylint: disable=W0201
self.name = str(name)
# pylint: disable=W0201
self.value = str(value)
return self
class LSet(LOperators):
__slots__ = []
identifier = "="
def apply_to_dict(self, d):
"""
:param d: Dictionary for apply value
"""
if self.name not in _reserved_keys:
d[self.name] = _substitution(self.value, d)
class LAppend(LOperators):
__slots__ = []
identifier = "+="
def apply_to_dict(self, d):
if self.name not in _reserved_keys:
d[self.name] = d.get(self.name, "") + _substitution(self.value, d)
class LPrepend(LOperators):
__slots__ = []
identifier = "<="
def apply_to_dict(self, d):
if self.name not in _reserved_keys:
d[self.name] = _substitution(self.value, d) + d.get(self.name, "")
class LLazySet(LOperators):
__slots__ = []
identifier = "~="
def apply_to_dict(self, d):
if self.name not in _reserved_keys and self.name not in d:
d[self.name] = _substitution(self.value, d)
class LRegExpSet(LOperators):
__slots__ = []
identifier = "?="
def apply_to_dict(self, d):
exp = re.compile("%s$" % self.name)
value = _substitution(self.value, d)
for key in d:
keystr = "".join(key) if isinstance(key, tuple) else key
if key not in _reserved_keys and exp.match(keystr):
d[key] = value
class LRegExpAppend(LOperators):
__slots__ = []
identifier = "?+="
def apply_to_dict(self, d):
exp = re.compile("%s$" % self.name)
value = _substitution(self.value, d)
for key in d:
keystr = "".join(key) if isinstance(key, tuple) else key
if key not in _reserved_keys and exp.match(keystr):
d[key] += value
class LRegExpPrepend(LOperators):
__slots__ = []
identifier = "?<="
def apply_to_dict(self, d):
exp = re.compile("%s$" % self.name)
value = _substitution(self.value, d)
for key in d:
keystr = "".join(key) if isinstance(key, tuple) else key
if key not in _reserved_keys and exp.match(keystr):
d[key] = value + d[key]
class LDel(LOperators):
__slots__ = []
identifier = "del"
def apply_to_dict(self, d):
exp = re.compile("%s$" % self.name)
keys_to_del = collections.deque()
for key in d:
if key not in _reserved_keys and exp.match(key):
keys_to_del.append(key)
for key in keys_to_del:
del d[key]
class LApplyPreDict(LOperators):
__slots__ = []
identifier = "apply_pre_dict"
def set_operands(self, name, value):
self.name = name # pylint: disable=W0201,E0237
self.value = value # pylint: disable=W0201,E0237
return self
def apply_to_dict(self, d):
d.update(self.value)
def __str__(self):
return "Apply_pre_dict: %s" % self.value
def __repr__(self):
return "Apply_pre_dict: %s" % self.value
class LUpdateFileMap(LOperators):
__slots__ = ["shortname", "dest"]
identifier = "update_file_map"
def set_operands(self, filename, name, dest="_name_map_file"):
# pylint: disable=W0201
self.name = name
# pylint: disable=W0201
if filename == "<string>":
self.shortname = filename
else:
self.shortname = os.path.basename(filename)
self.dest = dest
return self
def apply_to_dict(self, d):
dest = self.dest
if dest not in d:
d[dest] = {}
if self.shortname in d[dest]:
old_name = d[dest][self.shortname]
d[dest][self.shortname] = "%s.%s" % (self.name, old_name)
else:
d[dest][self.shortname] = self.name
class Suffix(LOperators):
__slots__ = []
identifier = "apply_suffix"
def __str__(self):
return "Suffix: %s" % (self.value)
def __repr__(self):
return "Suffix %s" % (self.value)
def __eq__(self, o):
if isinstance(o, self.__class__):
if self.value == o.value:
return True
def apply_to_dict(self, d):
for key in d.copy():
if key not in _reserved_keys:
# Store key as a tuple: (key, suffix1, suffix2, suffix3,....)
# This allows us to manipulate later on suffixes
# Add suffix to the key, remove the old key
new_key = (key if isinstance(key, tuple) else (key,)) + (self.value,)
d[new_key] = d.pop(key)
spec_iden = "_-"
spec_oper = "+<?~"
tokens_map = {"-": LVariant,
".": LDot,
":": LColon,
"@": LDefault,
",": LComa,
"[": LLBracket,
"]": LRBracket,
"(": LLRBracket,
")": LRRBracket,
"!": LNotCond}
tokens_oper = {"": LSet,
"~": LLazySet,
"+": LAppend,
"<": LPrepend,
"?": LRegExpSet,
"?+": LRegExpAppend,
"?<": LRegExpPrepend,
}
tokens_oper_re = [r"\=", r"\+\=", r"\<\=", r"\~\=", r"\?\=", r"\?\+\=", r"\?\<\="]
_ops_exp = re.compile(r"|".join(tokens_oper_re))
class Lexer(object):
def __init__(self, reader):
self.reader = reader
self.filename = reader.filename
self.line = None
self.linenum = 0
self.ignore_white = False
self.rest_as_string = False
self.match_func_index = 0
self.generator = self.get_lexer()
self.prev_indent = 0
self.fast = False
def set_prev_indent(self, prev_indent):
self.prev_indent = prev_indent
def set_fast(self):
self.fast = True
def set_strict(self):
self.fast = False
def match(self, line, pos):
l0 = line[0]
chars = ""
m = None
cind = 0
if l0 == "v":
if line.startswith("variants:"):
yield LVariants()
yield LColon()
pos = 9
elif line.startswith("variants "):
yield LVariants()
pos = 8
elif l0 == "-":
yield LVariant()
pos = 1
elif l0 == "o":
if line.startswith("only "):
yield LOnly()
pos = 4
while line[pos].isspace():
pos += 1
elif l0 == "n":
if line.startswith("no "):
yield LNo()
pos = 2
while line[pos].isspace():
pos += 1
elif l0 == "i":
if line.startswith("include "):
yield LInclude()
pos = 7
elif l0 == "d":
if line.startswith("del "):
yield LDel()
pos = 3
while line[pos].isspace():
pos += 1
elif l0 == "s":
if line.startswith("suffix "):
yield LSuffix()
pos = 6
while line[pos].isspace():
pos += 1
elif l0 == "j":
if line.startswith("join "):
yield LJoin()
pos = 4
while line[pos].isspace():
pos += 1
if self.fast and pos == 0: # due to refexp
cind = line[pos:].find(":")
m = _ops_exp.search(line[pos:])
oper = ""
token = None
if self.rest_as_string:
self.rest_as_string = False
yield LString(line[pos:].lstrip())
elif self.fast and m and (cind < 0 or cind > m.end()):
chars = ""
yield LIdentifier(line[:m.start()].rstrip())
yield tokens_oper[m.group()[:-1]]()
yield LString(line[m.end():].lstrip())
else:
li = enum(line[pos:], pos)
for pos, char in li:
if char.isalnum() or char in spec_iden: # alfanum+_-
chars += char
elif char in spec_oper: # <+?=~
if chars:
yield LIdentifier(chars)
oper = ""
chars = ""
oper += char
else:
if chars:
yield LIdentifier(chars)
chars = ""
if char.isspace(): # Whitespace
for pos, char in li:
if not char.isspace():
if not self.ignore_white:
yield LWhite()
break
if char.isalnum() or char in spec_iden:
chars += char
elif char == "=":
if oper in tokens_oper:
yield tokens_oper[oper]()
else:
raise LexerError("Unexpected character %s on"
" pos %s" % (char, pos),
self.line, self.filename,
self.linenum)
oper = ""
elif char in tokens_map:
token = tokens_map[char]()
elif char == "\"":
chars = ""
pos, char = next(li)
while char != "\"":
chars += char
pos, char = next(li)
yield LString(chars)
elif char == "#":
break
elif char in spec_oper:
oper += char
else:
raise LexerError("Unexpected character %s on"
" pos %s. Special chars are allowed"
" only in variable assignation"
" statement" % (char, pos), line,
self.filename, self.linenum)
if token is not None:
yield token
token = None
if self.rest_as_string:
self.rest_as_string = False
yield LString(line[pos + 1:].lstrip())
break
if chars:
yield LIdentifier(chars)
chars = ""
yield LEndL()
def get_lexer(self):
cr = self.reader
indent = 0
while True:
(self.line, indent,
self.linenum) = cr.get_next_line(self.prev_indent)
if not self.line:
yield LEndBlock(indent)
continue
yield LIndent(indent)
for token in self.match(self.line, 0):
yield token
def get_until_gen(self, end_tokens=None):
if end_tokens is None:
end_tokens = [LEndL]
token = next(self.generator)
while type(token) not in end_tokens:
yield token
token = next(self.generator)
yield token
def get_until(self, end_tokens=None):
if end_tokens is None:
end_tokens = [LEndL]
return [x for x in self.get_until_gen(end_tokens)]
def flush_until(self, end_tokens=None):
if end_tokens is None:
end_tokens = [LEndL]
for _ in self.get_until_gen(end_tokens):
pass
def get_until_check(self, lType, end_tokens=None):
"""
Read tokens from iterator until get end_tokens or type of token not
match ltype
:param lType: List of allowed tokens
:param end_tokens: List of tokens for end reading
:return: List of readed tokens.
"""
if end_tokens is None:
end_tokens = [LEndL]
tokens = []
lType = lType + end_tokens
for token in self.get_until_gen(end_tokens):
if type(token) in lType:
tokens.append(token)
else:
raise ParserError("Expected %s got %s" % (lType, type(token)),
self.line, self.filename, self.linenum)
return tokens
def get_until_no_white(self, end_tokens=None):
"""
Read tokens from iterator until get one of end_tokens and strip LWhite
:param end_tokens: List of tokens for end reading
:return: List of readed tokens.
"""
if end_tokens is None:
end_tokens = [LEndL]
return [x for x in self.get_until_gen(end_tokens) if type(x) != LWhite]
def rest_line_gen(self):
token = next(self.generator)
while type(token) != LEndL:
yield token
token = next(self.generator)
def rest_line(self):
return [x for x in self.rest_line_gen()]
def rest_line_no_white(self):
return [x for x in self.rest_line_gen() if type(x) != LWhite]
def rest_line_as_LString(self):
self.rest_as_string = True
lstr = next(self.generator)
next(self.generator)
return lstr
def get_next_check(self, lType):
token = next(self.generator)
if type(token) in lType:
return type(token), token
else:
raise ParserError("Expected %s got ['%s']=[%s]" %
([x.identifier for x in lType],
token.identifier, token),
self.line, self.filename, self.linenum)
def get_next_check_nw(self, lType):
token = next(self.generator)
while type(token) == LWhite:
token = next(self.generator)
if type(token) in lType:
return type(token), token
else:
raise ParserError("Expected %s got ['%s']" %
([x.identifier for x in lType],
token.identifier),
self.line, self.filename, self.linenum)
def check_token(self, token, lType):
if type(token) in lType:
return type(token), token
else:
raise ParserError("Expected %s got ['%s']" %
([x.identifier for x in lType],
token.identifier),
self.line, self.filename, self.linenum)
def next_nw(gener):
token = next(gener)
while type(token) == LWhite:
token = next(gener)
return token
def cmd_tokens(tokens1, tokens2):
for x, y in list(zip(tokens1, tokens2)):
if x != y:
return False
else:
return True
def apply_predict(lexer, node, pre_dict):
predict = LApplyPreDict().set_operands(None, pre_dict)
node.content += [(lexer.filename, lexer.linenum, predict)]
return {}
def parse_filter(lexer, tokens):
"""
:return: Parsed filter
"""
or_filters = []
tokens = iter(tokens + [LEndL()])
typet, token = lexer.check_token(next(tokens), [LIdentifier, LLRBracket,
LEndL, LWhite])
and_filter = []
con_filter = []
dots = 1
while typet not in [LEndL]:
if typet in [LIdentifier, LLRBracket]: # join identifier
if typet == LLRBracket: # (xxx=ttt)
_, ident = lexer.check_token(next_nw(tokens),
[LIdentifier]) # (iden
typet, _ = lexer.check_token(next_nw(tokens),
[LSet, LRRBracket]) # =
if typet == LRRBracket: # (xxx)
token = Label(str(ident))
elif typet == LSet: # (xxx = yyyy)
_, value = lexer.check_token(next_nw(tokens),
[LIdentifier, LString])
lexer.check_token(next_nw(tokens), [LRRBracket])
token = Label(str(ident), str(value))
else:
token = Label(token)
if dots == 1:
con_filter.append(token)
elif dots == 2:
and_filter.append(con_filter)
con_filter = [token]
elif dots == 0 or dots > 2:
raise ParserError("Syntax Error expected \".\" between"
" Identifier.", lexer.line, lexer.filename,
lexer.linenum)
dots = 0
elif typet == LDot: # xxx.xxxx or xxx..xxxx
dots += 1
elif typet in [LComa, LWhite]:
if dots > 0:
raise ParserError("Syntax Error expected identifier between"
" \".\" and \",\".", lexer.line,
lexer.filename, lexer.linenum)
if and_filter:
if con_filter:
and_filter.append(con_filter)
con_filter = []
or_filters.append(and_filter)
and_filter = []
elif con_filter:
or_filters.append([con_filter])
con_filter = []
elif typet == LIdentifier:
or_filters.append([[Label(token)]])
else:
raise ParserError("Syntax Error expected \",\" between"
" Identifier.", lexer.line, lexer.filename,
lexer.linenum)
dots = 1
token = next(tokens)
while type(token) == LWhite:
token = next(tokens)
typet, token = lexer.check_token(token, [LIdentifier,
LComa, LDot,
LLRBracket, LEndL])
continue
typet, token = lexer.check_token(next(tokens), [LIdentifier, LComa,
LDot, LLRBracket,
LEndL, LWhite])
if and_filter:
if con_filter:
and_filter.append(con_filter)
con_filter = []
or_filters.append(and_filter)
and_filter = []
if con_filter:
or_filters.append([con_filter])
con_filter = []
return or_filters
class Parser(object):
# pylint: disable=W0102
def __init__(self, filename=None, defaults=False, expand_defaults=[],
debug=False):
self.node = Node()
self.debug = debug
self.defaults = defaults
self.expand_defaults = [LIdentifier(x) for x in expand_defaults]
self.filename = filename
if self.filename:
self.parse_file(self.filename)
self.only_filters = []
self.no_filters = []
self.assignments = []
# get_dicts() - is recursive generator, it can invoke itself,
# as well as it can be called outside to get dic list
# It is necessary somehow mark top-level generator,
# to be able process all variables, do suffix stuff, drops dups, etc....
# It can be safely done only on top top level get_dicts()
# Parent generator will reset this flag
self.parent_generator = True
def _debug(self, s, *args):
if self.debug:
logging.debug(s, *args)
def _warn(self, s, *args):
logging.warn(s, *args)
def parse_file(self, filename):
"""
Parse a file.
:param filename: Path of the configuration file.
"""
self.node.filename = filename
self.node = self._parse(Lexer(FileReader(filename)), self.node)
self.filename = filename
def parse_string(self, s):
"""
Parse a string.
:param s: String to parse.
"""
self.node.filename = StrReader("").filename
self.node = self._parse(Lexer(StrReader(s)), self.node)
def only_filter(self, variant):
"""
Apply a only filter programatically and keep track of it.
Equivalent to parse a "only variant" line.
:param variant: String with the variant name.
"""
string = "only %s" % variant
self.only_filters.append(string)
self.parse_string(string)
def no_filter(self, variant):
"""
Apply a only filter programatically and keep track of it.
Equivalent to parse a "no variant" line.
:param variant: String with the variant name.
"""
string = "no %s" % variant
self.only_filters.append(string)
self.parse_string(string)
def assign(self, key, value):
"""
Apply a only filter programatically and keep track of it.
Equivalent to parse a "key = value" line.
:param variant: String with the variant name.
"""
string = "%s = %s" % (key, value)
self.assignments.append(string)
self.parse_string(string)
def _parse(self, lexer, node=None, prev_indent=-1):
if not node:
node = self.node
block_allowed = [LVariants, LIdentifier, LOnly,
LNo, LInclude, LDel, LNotCond, LSuffix, LJoin]
variants_allowed = [LVariant]
identifier_allowed = [LSet, LAppend, LPrepend, LLazySet,
LRegExpSet, LRegExpAppend,
LRegExpPrepend, LColon,
LEndL]
varianst_allowed_in = [LLBracket, LColon, LIdentifier, LEndL]
indent_allowed = [LIndent, LEndBlock]
allowed = block_allowed
var_indent = 0
var_name = ""
# meta contains variants meta-data
meta = {}
# pre_dict contains block of operation without collision with
# others block or operation. Increase speed almost twice.
pre_dict = {}
lexer.set_fast()
# Suffix should be applied as the last operator in the dictionary
# Reasons:
# 1. Escape multiply suffix operators
# 2. Affect all elements in current block
suffix = None
try:
while True:
lexer.set_prev_indent(prev_indent)
typet, token = lexer.get_next_check(indent_allowed)
if typet == LEndBlock:
if pre_dict:
# flush pre_dict to node content.
pre_dict = apply_predict(lexer, node, pre_dict)
if suffix:
# Node has suffix, apply it to all elements
node.content.append(suffix)
return node
indent = token.length
typet, token = lexer.get_next_check(allowed)
if typet == LIdentifier:
# Parse:
# identifier .....
identifier = lexer.get_until_no_white(identifier_allowed)
if isinstance(identifier[-1], LOperators): # operand = <=
# Parse:
# identifier = xxx
# identifier <= xxx
# identifier ?= xxx
# etc..
op = identifier[-1]
if (len(identifier) == 1):
identifier = token
else:
identifier = [token] + identifier[:-1]
identifier = "".join([str(x) for x in identifier])
_, value = lexer.get_next_check([LString])
if value and (value[0] == value[-1] == '"' or
value[0] == value[-1] == "'"):
value = value[1:-1]
op.set_operands(identifier, value)
d_nin_val = "$" not in value
if type(op) == LSet and d_nin_val: # Optimization
op.apply_to_dict(pre_dict)
else:
if pre_dict:
# flush pre_dict to node content.
# If block already contain xxx = yyyy
# then operation xxx +=, <=, .... are safe.
if op.name in pre_dict and d_nin_val:
op.apply_to_dict(pre_dict)
lexer.get_next_check([LEndL])
continue
else:
pre_dict = apply_predict(lexer, node,
pre_dict)
node.content += [(lexer.filename,
lexer.linenum,
op)]
lexer.get_next_check([LEndL])
elif type(identifier[-1]) == LColon: # condition:
# Parse:
# xxx.yyy.(aaa=bbb):
identifier = [token] + identifier[:-1]
cfilter = parse_filter(lexer, identifier + [LEndL()])
next_line = lexer.rest_line_as_LString()
if next_line != "":
lexer.reader.set_next_line(next_line, indent + 1,
lexer.linenum)
cond = Condition(cfilter, lexer.line)
self._parse(lexer, cond, prev_indent=indent)
pre_dict = apply_predict(lexer, node, pre_dict)
node.content += [(lexer.filename, lexer.linenum, cond)]
else:
raise ParserError("Syntax ERROR expected \":\" or"
" operand", lexer.line,
lexer.filename, lexer.linenum)
elif typet == LVariant:
# Parse
# - var1: depend1, depend2
# block1
# - var2:
# block2
if pre_dict:
pre_dict = apply_predict(lexer, node, pre_dict)
already_default = False
is_default = False
meta_with_default = False
if "default" in meta:
meta_with_default = True
meta_in_expand_defautls = False
if var_name not in self.expand_defaults:
meta_in_expand_defautls = True
node4 = Node()
while True:
lexer.set_prev_indent(var_indent)
# Get token from lexer and check syntax.
typet, token = lexer.get_next_check_nw([LIdentifier,
LDefault,
LIndent,
LEndBlock])
if typet == LEndBlock:
break
if typet == LIndent:
lexer.get_next_check_nw([LVariant])
typet, token = lexer.get_next_check_nw(
[LIdentifier,
LDefault])
if typet == LDefault: # @
is_default = True
name = lexer.get_until_check([LIdentifier, LDot],
[LColon])
else: # identificator
is_default = False
name = [token] + lexer.get_until_check(
[LIdentifier, LDot],
[LColon])
if len(name) == 2:
name = [name[0]]
raw_name = name
else:
raw_name = [x for x in name[:-1]]
name = [x for x in name[:-1]
if type(x) == LIdentifier]
token = next(lexer.generator)
while type(token) == LWhite:
token = next(lexer.generator)
tokens = None
if type(token) != LEndL:
tokens = [token] + lexer.get_until([LEndL])
deps = parse_filter(lexer, tokens)
else:
deps = []
# Prepare data for dict generator.
node2 = Node()
node2.children = [node]
node2.labels = node.labels
if var_name:
op = LSet().set_operands(var_name,
".".join([str(n) for n in name]))
node2.content += [(lexer.filename,
lexer.linenum,
op)]
node3 = self._parse(lexer, node2, prev_indent=indent)
if var_name:
node3.var_name = var_name
node3.name = [Label(var_name, str(n))
for n in name]
else:
node3.name = [Label(str(n)) for n in name]
# Update mapping name to file
node3.dep = deps
if meta_with_default:
for wd in meta["default"]:
if cmd_tokens(wd, raw_name):
is_default = True
meta["default"].remove(wd)
if (is_default and not already_default and
meta_in_expand_defautls):
node3.default = True
already_default = True
node3.append_to_shortname = not is_default
op = LUpdateFileMap()
op.set_operands(lexer.filename,
".".join(str(x)
for x in node3.name))
node3.content += [(lexer.filename,
lexer.linenum,
op)]
op = LUpdateFileMap()
op.set_operands(lexer.filename,
".".join(str(x.name)
for x in node3.name),
"_short_name_map_file")
node3.content += [(lexer.filename,
lexer.linenum,
op)]
if node3.default and self.defaults:
# Move default variant in front of rest
# of all variants.
# Speed optimization.
node4.children.insert(0, node3)
else:
node4.children += [node3]
node4.labels.update(node3.labels)
node4.labels.update(node3.name)
if "default" in meta and meta["default"]:
raise ParserError("Missing default variant %s" %
(meta["default"]), lexer.line,
lexer.filename, lexer.linenum)
allowed = block_allowed
node = node4
elif typet == LVariants: # _name_ [meta1=xxx] [yyy] [xxx]
# Parse
# variants _name_ [meta1] [meta2]:
if type(node) in [Condition, NegativeCondition]:
raise ParserError("'variants' is not allowed inside a "
"conditional block", lexer.line,
lexer.reader.filename, lexer.linenum)
lexer.set_strict()
tokens = lexer.get_until_no_white([LLBracket, LColon,
LIdentifier, LEndL])
vtypet = type(tokens[-1])
var_name = ""
meta.clear()
# [meta1=xxx] [yyy] [xxx]
while vtypet not in [LColon, LEndL]:
if vtypet == LIdentifier:
if var_name != "":
raise ParserError("Syntax ERROR expected"
" \"[\" or \":\"",
lexer.line, lexer.filename,
lexer.linenum)
var_name = tokens[0]
elif vtypet == LLBracket: # [
_, ident = lexer.get_next_check_nw([LIdentifier])
typet, _ = lexer.get_next_check_nw([LSet,
LRBracket])
if typet == LRBracket: # [xxx]
if ident not in meta:
meta[ident] = []
meta[ident].append(True)
elif typet == LSet: # [xxx = yyyy]
tokens = lexer.get_until_no_white([LRBracket,
LEndL])
if type(tokens[-1]) == LRBracket:
if ident not in meta:
meta[ident] = []
meta[ident].append(tokens[:-1])
else:
raise ParserError("Syntax ERROR"
" expected \"]\"",
lexer.line,
lexer.filename,
lexer.linenum)
tokens = lexer.get_next_check_nw(varianst_allowed_in)
vtypet = type(tokens[-1])
if "default" in meta:
for wd in meta["default"]:
if type(wd) != list:
raise ParserError("Syntax ERROR expected "
"[default=xxx]",
lexer.line,
lexer.filename,
lexer.linenum)
if vtypet == LEndL:
raise ParserError("Syntax ERROR expected \":\"",
lexer.line, lexer.filename,
lexer.linenum)
lexer.get_next_check_nw([LEndL])
allowed = variants_allowed
var_indent = indent
elif typet in [LNo, LOnly]:
# Parse:
# only/no (filter=text)..aaa.bbb, xxxx
lfilter = parse_filter(lexer, lexer.rest_line())
pre_dict = apply_predict(lexer, node, pre_dict)
if typet == LOnly:
node.content += [(lexer.filename, lexer.linenum,
OnlyFilter(lfilter, lexer.line))]
else: # LNo
node.content += [(lexer.filename, lexer.linenum,
NoFilter(lfilter, lexer.line))]
elif typet == LJoin:
# Parse:
# join (filter=text)..aaa.bbb, xxxx
# syntax is the same as for No/Only filters
lfilter = parse_filter(lexer, lexer.rest_line())
pre_dict = apply_predict(lexer, node, pre_dict)
node.content += [(lexer.filename, lexer.linenum, JoinFilter(lfilter, lexer.line))]
elif typet == LSuffix:
# Parse:
# suffix SUFFIX
if pre_dict:
pre_dict = apply_predict(lexer, node, pre_dict)
token_type, token_val = lexer.get_next_check([LIdentifier])
lexer.get_next_check([LEndL])
suffix_operator = Suffix().set_operands(None, token_val)
# Suffix will be applied as all other elements in current node are processed:
suffix = (lexer.filename, lexer.linenum, suffix_operator)
elif typet == LInclude:
# Parse:
# include relative file patch to working directory.
path = lexer.rest_line_as_LString()
filename = os.path.expanduser(path)
if (isinstance(lexer.reader, FileReader) and
not os.path.isabs(filename)):
filename = os.path.join(
os.path.dirname(lexer.filename),
filename)
if not os.path.isfile(filename):
raise MissingIncludeError(lexer.line, lexer.filename,
lexer.linenum)
pre_dict = apply_predict(lexer, node, pre_dict)
lch = Lexer(FileReader(filename))
node = self._parse(lch, node, -1)
lexer.set_prev_indent(prev_indent)
elif typet == LDel:
# Parse:
# del operand
_, to_del = lexer.get_next_check_nw([LIdentifier])
lexer.get_next_check_nw([LEndL])
token.set_operands(to_del, None)
pre_dict = apply_predict(lexer, node, pre_dict)
node.content += [(lexer.filename, lexer.linenum,
token)]
elif typet == LNotCond:
# Parse:
# !xxx.yyy.(aaa=bbb): vvv
lfilter = parse_filter(lexer,
lexer.get_until_no_white(
[LColon, LEndL])[:-1])
next_line = lexer.rest_line_as_LString()
if next_line != "":
lexer.reader.set_next_line(next_line, indent + 1,
lexer.linenum)
cond = NegativeCondition(lfilter, lexer.line)
self._parse(lexer, cond, prev_indent=indent)
lexer.set_prev_indent(prev_indent)
pre_dict = apply_predict(lexer, node, pre_dict)
node.content += [(lexer.filename, lexer.linenum, cond)]
else:
raise ParserError("Syntax ERROR expected", lexer.line,
lexer.filename, lexer.linenum)
except Exception:
self._debug("%s %s: %s" % (lexer.filename, lexer.linenum,
lexer.line))
raise
def drop_suffixes(self, d):
"""
Merge suffixes for same var, or drop off unnecessary suffixes
This step can be done safely only by the top level generator, before
outputting the dictionary to outside world.
"""
# dictionary `d' is going to change, keep its original copy
d_orig = d.copy()
for key in d_orig:
if key in _reserved_keys:
continue
if not isinstance(key, tuple):
continue
try:
# This file was invoked through cmdline
options.skipdups
skipdups = options.skipdups
except NameError:
# This file was invoked as Python module
skipdups = True
if skipdups:
# Drop vars with suffixes matches general var val
# Example: if a_x == 1, and a == 1. Drop: a_x, leave a
gen_var_name = key[0]
if gen_var_name in d_orig and d_orig[gen_var_name] == d_orig[key]:
# Drop gen_var_name, use general key with same value
d.pop(key)
continue
can_drop_all_suffixes_for_this_key = True
for k in d_orig:
gen_name = k[0] if isinstance(k, tuple) else k
if gen_var_name == gen_name:
if d_orig[key] != d_orig[k]:
can_drop_all_suffixes_for_this_key = False
break
if skipdups and can_drop_all_suffixes_for_this_key:
new_key = key[0]
else:
# merge suffixes, preserve reverse order of suffixes
new_key = key[:1] + key[1:][::-1]
new_key = ''.join((map(str, new_key)))
d[new_key] = d.pop(key)
def get_dicts(self, node=None, ctx=[], content=[], shortname=[], dep=[]):
"""
Process 'join' entry, unpack join filter for node.
:param ctx: node labels/names
:param content: previous content in plain
:returns: dictionary
1) join filter_1 filter_2 ....
multiplies all dictionaries as:
all_variants_match_filter_1 * all_variants_match_filter_2 * ....
2) join only_one_filter
== only only_one_filter
3) join filter_1 filter_1
also works and transforms to:
all_variants_match_filter_1 * all_variants_match_filter_1
Example:
join a
join a
Transforms into:
join a a
"""
node = node or self.node
# Keep track to know who is a parent generator
parent = False
if self.parent_generator:
# I am parent of the all
parent = True
# No one else is
self.parent_generator = False
# Node is a current block. It has content, its contents: node.content
# Content without joins
new_content = []
# All joins in current node
joins = []
for t in node.content:
filename, linenum, obj = t
if not isinstance(obj, JoinFilter):
new_content.append(t)
continue
# Accumulate all joins at one node
joins += [t]
if not joins:
# Return generator
for d in self.get_dicts_plain(node, ctx, content, shortname, dep):
if parent:
self.drop_suffixes(d)
yield d
else:
# Rewrite all separate joins in one node as many `only'
onlys = []
for j in joins:
filename, linenum, obj = j
for word in obj.filter:
f = OnlyFilter([word], str(word))
onlys += [(filename, linenum, f)]
old_content = node.content[:]
node.content = new_content
for d in self.multiply_join(onlys, node, ctx, content, shortname, dep):
if parent:
self.drop_suffixes(d)
yield d
node.content = old_content[:]
def mk_name(self, n1, n2):
"""Make name for test. Case: two dics were merged"""
common_prefix = n1[:[x[0] == x[1] for x in list(zip(n1, n2))].index(0)]
cp = ".".join(common_prefix.split('.')[:-1])
p1 = re.sub(r"^"+cp, "", n1)
p2 = re.sub(r"^"+cp, "", n2)
if cp:
name = cp + p1 + p2
else:
name = p1 + "." + p2
return name
def multiply_join(self, onlys, node=None, ctx=[], content=[], shortname=[], dep=[]):
"""
Multiply all joins. Return dictionaries one by one
Each `join' is the same as `only' filter
This functions is supposed to be a generator, recursive generator
"""
# Current join/only
only = onlys[:1]
remains = onlys[1:]
content_orig = node.content[:]
node.content += only
if not remains:
for d in self.get_dicts_plain(node, ctx, content, shortname, dep):
yield d
else:
for d1 in self.get_dicts_plain(node, ctx, content, shortname, dep):
# Current frame multiply by all variants from bottom
node.content = content_orig
for d2 in self.multiply_join(remains, node, ctx, content, shortname, dep):
d = d1.copy()
d.update(d2)
d["name"] = self.mk_name(d1["name"], d2["name"])
d["shortname"] = self.mk_name(d1["shortname"], d2["shortname"])
yield d
def get_dicts_plain(self, node=None, ctx=[], content=[], shortname=[], dep=[]):
"""
Generate dictionaries from the code parsed so far. This should
be called after parsing something.
:return: A dict generator.
"""
def process_content(content, failed_filters):
# 1. Check that the filters in content are OK with the current
# context (ctx).
# 2. Move the parts of content that are still relevant into
# new_content and unpack conditional blocks if appropriate.
# For example, if an 'only' statement fully matches ctx, it
# becomes irrelevant and is not appended to new_content.
# If a conditional block fully matches, its contents are
# unpacked into new_content.
# 3. Move failed filters into failed_filters, so that next time we
# reach this node or one of its ancestors, we'll check those
# filters first.
blocked_filters = []
for t in content:
filename, linenum, obj = t
if isinstance(obj, LOperators):
new_content.append(t)
continue
# obj is an OnlyFilter/NoFilter/Condition/NegativeCondition
if obj.requires_action(ctx, ctx_set, labels):
# This filter requires action now
if type(obj) is OnlyFilter or type(obj) is NoFilter:
if obj not in blocked_filters:
self._debug(" filter did not pass: %r (%s:%s)",
obj.line, filename, linenum)
failed_filters.append(t)
return False
else:
continue
else:
self._debug(" conditional block matches:"
" %r (%s:%s)", obj.line, filename, linenum)
# Check and unpack the content inside this Condition
# object (note: the failed filters should go into
# new_internal_filters because we don't expect them to
# come from outside this node, even if the Condition
# itself was external)
if not process_content(obj.content,
new_internal_filters):
failed_filters.append(t)
return False
continue
elif obj.is_irrelevant(ctx, ctx_set, labels):
# This filter is no longer relevant and can be removed
continue
else:
# Keep the filter and check it again later
new_content.append(t)
return True
def might_pass(failed_ctx,
failed_ctx_set,
failed_external_filters,
failed_internal_filters):
all_content = content + node.content
for t in failed_external_filters + failed_internal_filters:
if t not in all_content:
return True
for t in failed_external_filters:
_, _, external_filter = t
if not external_filter.might_pass(failed_ctx,
failed_ctx_set,
ctx, ctx_set,
labels):
return False
for t in failed_internal_filters:
if t not in node.content:
return True
for t in failed_internal_filters:
_, _, internal_filter = t
if not internal_filter.might_pass(failed_ctx,
failed_ctx_set,
ctx, ctx_set,
labels):
return False
return True
def add_failed_case():
node.failed_cases.appendleft((ctx, ctx_set,
new_external_filters,
new_internal_filters))
if len(node.failed_cases) > num_failed_cases:
node.failed_cases.pop()
node = node or self.node
# if self.debug: #Print dict on which is working now.
# node.dump(0)
# Update dep
for d in node.dep:
for dd in d:
dep = dep + [".".join([str(label) for label in ctx + dd])]
# Update ctx
ctx = ctx + node.name
ctx_set = set(ctx)
labels = node.labels
# Get the current name
name = ".".join([str(label) for label in ctx])
if node.name:
self._debug("checking out %r", name)
# Check previously failed filters
for i, failed_case in enumerate(node.failed_cases):
if not might_pass(*failed_case):
self._debug("\n* this subtree has failed before %s\n"
" content: %s\n"
" failcase:%s\n",
name, content + node.content, failed_case)
del node.failed_cases[i]
node.failed_cases.appendleft(failed_case)
return
# Check content and unpack it into new_content
new_content = []
new_external_filters = []
new_internal_filters = []
if (not process_content(node.content, new_internal_filters) or
not process_content(content, new_external_filters)):
add_failed_case()
self._debug("Failed_cases %s", node.failed_cases)
return
# Update shortname
if node.append_to_shortname:
shortname = shortname + node.name
# Recurse into children
count = 0
if self.defaults and node.var_name not in self.expand_defaults:
for n in node.children:
for d in self.get_dicts(n, ctx, new_content, shortname, dep):
count += 1
yield d
if n.default and count:
break
else:
for n in node.children:
for d in self.get_dicts(n, ctx, new_content, shortname, dep):
count += 1
yield d
# Reached leaf?
if not node.children:
self._debug(" reached leaf, returning it")
d = {"name": name, "dep": dep,
"shortname": ".".join([str(sn.name) for sn in shortname])}
for _, _, op in new_content:
op.apply_to_dict(d)
postfix_parse(d)
yield d
def print_dicts_default(options, dicts):
"""Print dictionaries in the default mode"""
for count, dic in enumerate(dicts):
if options.fullname:
print("dict %4d: %s" % (count + 1, dic["name"]))
else:
print("dict %4d: %s" % (count + 1, dic["shortname"]))
if options.contents:
keys = list(dic.keys())
keys.sort()
for key in keys:
print(" %s = %s" % (key, dic[key]))
# pylint: disable=W0613
def print_dicts_repr(options, dicts):
import pprint
print("[")
for dic in dicts:
print("%s," % (pprint.pformat(dic)))
print("]")
def print_dicts(options, dicts):
if options.repr_mode:
print_dicts_repr(options, dicts)
else:
print_dicts_default(options, dicts)
def convert_data_size(size, default_sufix='B'):
"""
Convert data size from human readable units to an int of arbitrary size.
:param size: Human readable data size representation (string).
:param default_sufix: Default sufix used to represent data.
:return: Int with data size in the appropriate order of magnitude.
"""
orders = {'B': 1,
'K': 1024,
'M': 1024 * 1024,
'G': 1024 * 1024 * 1024,
'T': 1024 * 1024 * 1024 * 1024,
}
order = re.findall("([BbKkMmGgTt])", size[-1])
if not order:
size += default_sufix
order = [default_sufix]
return int(float(size[0:-1]) * orders[order[0].upper()])
def compare_string(str1, str2):
"""
Compare two int string and return -1, 0, 1.
It can compare two memory value even in sufix
:param str1: The first string
:param str2: The second string
:Return: Rteurn -1, when str1< str2
0, when str1 = str2
1, when str1> str2
"""
order1 = re.findall("([BbKkMmGgTt])", str1)
order2 = re.findall("([BbKkMmGgTt])", str2)
if order1 or order2:
value1 = convert_data_size(str1, "M")
value2 = convert_data_size(str2, "M")
else:
value1 = int(str1)
value2 = int(str2)
if value1 < value2:
return -1
elif value1 == value2:
return 0
else:
return 1
def postfix_parse(dic):
tmp_dict = {}
for key in dic:
# Bypass the case that use tuple as key value
if isinstance(key, tuple):
continue
if key.endswith("_max"):
tmp_key = key.split("_max")[0]
if (tmp_key not in dic or
compare_string(dic[tmp_key], dic[key]) > 0):
tmp_dict[tmp_key] = dic[key]
elif key.endswith("_min"):
tmp_key = key.split("_min")[0]
if (tmp_key not in dic or
compare_string(dic[tmp_key], dic[key]) < 0):
tmp_dict[tmp_key] = dic[key]
elif key.endswith("_fixed"):
tmp_key = key.split("_fixed")[0]
tmp_dict[tmp_key] = dic[key]
for key in tmp_dict:
dic[key] = tmp_dict[key]
if __name__ == "__main__":
parser = optparse.OptionParser('usage: %prog [options] filename '
'[extra code] ...\n\nExample:\n\n '
'%prog tests.cfg "only my_set" "no qcow2"')
parser.add_option("-v", "--verbose", dest="debug", action="store_true",
help="include debug messages in console output")
parser.add_option("-f", "--fullname", dest="fullname", action="store_true",
help="show full dict names instead of short names")
parser.add_option("-c", "--contents", dest="contents", action="store_true",
help="show dict contents")
parser.add_option("-r", "--repr", dest="repr_mode", action="store_true",
help="output parsing results Python format")
parser.add_option("-d", "--defaults", dest="defaults", action="store_true",
help="use only default variant of variants if there"
" is some")
parser.add_option("-e", "--expand", dest="expand", type="string",
help="list of vartiant which should be expanded when"
" defaults is enabled. \"name, name, name\"")
parser.add_option("-s", "--skip-dups", dest="skipdups", default=True, action="store_false",
help="Don't drop variables with different suffixes and same val")
options, args = parser.parse_args()
if not args:
parser.error("filename required")
if options.debug:
logging.basicConfig(level=logging.DEBUG)
expand = []
if options.expand:
expand = [x.strip() for x in options.expand.split(",")]
c = Parser(args[0], defaults=options.defaults, expand_defaults=expand,
debug=options.debug)
for s in args[1:]:
c.parse_string(s)
if options.debug:
c.node.dump(0, True)
dicts = c.get_dicts()
print_dicts(options, dicts)
|
balamuruhans/avocado-vt
|
virttest/cartesian_config.py
|
Python
|
gpl-2.0
| 78,979
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from rally.deployment.engines import devstack
from tests.unit import test
SAMPLE_CONFIG = {
"type": "DevstackEngine",
"provider": {
"name": "ExistingServers",
"credentials": [{"user": "root", "host": "example.com"}],
},
"localrc": {
"ADMIN_PASSWORD": "secret",
},
}
DEVSTACK_REPO = "https://git.openstack.org/openstack-dev/devstack"
class DevstackEngineTestCase(test.TestCase):
def setUp(self):
super(DevstackEngineTestCase, self).setUp()
self.deployment = {
"uuid": "de641026-dbe3-4abe-844a-ffef930a600a",
"config": SAMPLE_CONFIG,
}
self.engine = devstack.DevstackEngine(self.deployment)
def test_invalid_config(self):
self.deployment = SAMPLE_CONFIG.copy()
self.deployment["config"] = {"type": 42}
engine = devstack.DevstackEngine(self.deployment)
self.assertRaises(jsonschema.ValidationError,
engine.validate)
def test_construct(self):
self.assertEqual(self.engine.localrc["ADMIN_PASSWORD"], "secret")
@mock.patch("rally.deployment.engines.devstack.open", create=True)
def test_prepare_server(self, mock_open):
mock_open.return_value = "fake_file"
server = mock.Mock()
server.password = "secret"
self.engine.prepare_server(server)
calls = [
mock.call("/bin/sh -e", stdin="fake_file"),
mock.call("chpasswd", stdin="rally:secret"),
]
self.assertEqual(calls, server.ssh.run.mock_calls)
filename = mock_open.mock_calls[0][1][0]
self.assertTrue(filename.endswith("rally/deployment/engines/"
"devstack/install.sh"))
self.assertEqual([mock.call(filename, "rb")], mock_open.mock_calls)
@mock.patch("rally.deployment.engine.Engine.get_provider")
@mock.patch("rally.deployment.engines.devstack.get_updated_server")
@mock.patch("rally.deployment.engines.devstack.get_script")
@mock.patch("rally.deployment.serverprovider.provider.Server")
@mock.patch("rally.deployment.engines.devstack.objects.Endpoint")
def test_deploy(self, mock_endpoint, mock_server, mock_get_script,
mock_get_updated_server, mock_engine_get_provider):
mock_engine_get_provider.return_value = fake_provider = (
mock.Mock()
)
server = mock.Mock(host="host")
mock_endpoint.return_value = "fake_endpoint"
mock_get_updated_server.return_value = ds_server = mock.Mock()
mock_get_script.return_value = "fake_script"
server.get_credentials.return_value = "fake_credentials"
fake_provider.create_servers.return_value = [server]
with mock.patch.object(self.engine, "deployment") as mock_deployment:
endpoints = self.engine.deploy()
self.assertEqual({"admin": "fake_endpoint"}, endpoints)
mock_endpoint.assert_called_once_with(
"http://host:5000/v2.0/", "admin", "secret", "admin", "admin")
mock_deployment.add_resource.assert_called_once_with(
info="fake_credentials",
provider_name="DevstackEngine",
type="credentials")
repo = "https://git.openstack.org/openstack-dev/devstack"
cmd = "/bin/sh -e -s %s master" % repo
server.ssh.run.assert_called_once_with(cmd, stdin="fake_script")
ds_calls = [
mock.call.ssh.run("cat > ~/devstack/localrc", stdin=mock.ANY),
mock.call.ssh.run("~/devstack/stack.sh")
]
self.assertEqual(ds_calls, ds_server.mock_calls)
localrc = ds_server.mock_calls[0][2]["stdin"]
self.assertIn("ADMIN_PASSWORD=secret", localrc)
|
afaheem88/rally
|
tests/unit/deployment/engines/test_devstack.py
|
Python
|
apache-2.0
| 4,402
|
#! /usr/bin/env python
""" Create files for jmod unit test """
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d, a = pipe.read("time_complex.fid")
d, a = p.jmod(d, a, cos=True, j=10.0, lb=5.0, c=1.0)
pipe.write("jmod1.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.jmod(d, a, sin=True, j=18.0, lb=1.4, c=0.5, start=100, size=800,
one=True)
pipe.write("jmod2.glue", d, a, overwrite=True)
|
atomman/nmrglue
|
tests/pipe_proc_tests/jmod.py
|
Python
|
bsd-3-clause
| 458
|
import theano
import theano.tensor as T
import numpy as np
import copy
import os
import datetime
import cPickle as pickle
from Loss import nll_multiclass_3d,categorical_crossentropy
from Initializations import glorot_uniform,zero,alloc_zeros_matrix,norm_weight,glorot_normal
from Utils import Progbar,ndim_tensor,make_batches,slice_X,seq_to_text
from Optimizers import SGD,RMSprop,Adagrad,Adadelta,Adam
from Layers import dropout_layer
import Callbacks as cbks
mode = theano.Mode(linker='cvm', optimizer='fast_run') #the runtime algo to execute the code is in c
class ENC_DEC(object):
def __init__(self,n_in,n_hidden,n_decoder,n_out,
n_epochs=400,n_batch=16,maxlen=20,n_words_x=10000,n_words_y=10000,dim_word=100,
snapshot=100,sample_Freq=100,val_Freq=100,shared_emb=False,L1_reg=0,L2_reg=0):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.n_decoder=int(n_decoder)
self.n_out=int(n_out)
self.n_batch=int(n_batch)
self.shared_emb=shared_emb
self.n_epochs=n_epochs
self.maxlen= int(maxlen)
self.dim_word=dim_word
self.n_words_x=n_words_x
self.n_words_y=n_words_y
self.x = T.matrix(name = 'x', dtype = 'int32')
self.y = T.matrix(name = 'y', dtype = 'int32')
self.x_mask = T.matrix(name = 'x_mask', dtype = 'float32')
self.y_mask = T.matrix(name = 'y_mask', dtype = 'float32')
#self.x_emb = T.tensor3(name = 'x', dtype = 'float32')
#self.y_emb = T.tensor3(name = 'y', dtype = 'float32')
self.W_hy = glorot_uniform((self.n_out,self.n_words_y))
self.b_hy = zero((self.n_words_y,))
self.W_hi = glorot_uniform((self.n_hidden,self.n_decoder))
self.b_hi = zero((n_decoder,))
self.Wemb=glorot_normal((self.n_words_x,self.dim_word))
self.x_emb=self.Wemb[self.x]
self.y_emb=self.Wemb[self.y]
if not self.shared_emb:
self.Wemb_dec=glorot_normal((self.n_words_y,self.dim_word))
self.y_emb=self.Wemb_dec[self.y]
self.layers = []
self.params=[]
self.errors=[]
self.val_errors=[]
self.snapshot=int(snapshot)
self.sample_Freq=int(sample_Freq)
self.val_Freq=int(val_Freq)
self.L1_reg=L1_reg
self.L2_reg=L2_reg
self.L1= 0
self.L2_sqr= 0
def add(self,layer):
self.layers.append(layer)
if len(self.layers) > 1:
self.layers[-1].set_previous(self.layers[-2])
else:
self.set_input()
self.set_mask()
self.params+=layer.params
self.L1 += layer.L1
self.L2_sqr += layer.L2_sqr
def set_params(self,**params):
return
def __getstate__(self):
""" Return state sequence."""
params = self.params # parameters set in constructor
weights = [p.get_value() for p in self.params]
error=self.errors
state = (params, weights,error)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights,error = state
#self.set_params(**params)
#self.ready()
self._set_weights(weights)
self.errors=error
def save(self, fpath='temp/', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
print("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
print("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def set_mask(self):
self.layers[0].x_mask = self.x_mask
def set_input(self):
for l in self.layers:
if hasattr(l, 'input'):
self.layers[0].input = self.x_emb
break
def get_input(self,train):
if not hasattr(self.layers[0], 'input'):
self.set_input()
return self.layers[0].get_input(train)
def get_output(self,train):
## calculate initial state
ctx=self.layers[-1].get_input(train)
ctx_mean = (ctx * self.x_mask[:,:,None]).sum(1) / self.x_mask.sum(1)[:,None]
#ctx_mean = dropout_layer(ctx_mean, train)
init_state=T.tanh(T.dot(ctx_mean, self.W_hi) + self.b_hi)
proj=self.layers[-1].get_output(self.y_emb,self.y_mask,init_state,train)
### fianl prediction formular
proj = dropout_layer(proj, train)
self.y_pred = T.dot(proj, self.W_hy) + self.b_hy
y_p_m = T.reshape(self.y_pred, (self.y_pred.shape[0] * self.y_pred.shape[1], -1)) ### memory cautiom
y_p_s = T.nnet.softmax(y_p_m)
#p_y_given_x = T.reshape(y_p_s, self.y_pred.shape)
return T.reshape(y_p_s, self.y_pred.shape)
def build(self):
### set up parameters
if self.shared_emb: self.params+=[self.W_hi, self.b_hi,self.W_hy, self.b_hy, self.Wemb]
else: self.params+=[self.W_hi, self.b_hi,self.W_hy, self.b_hy, self.Wemb, self.Wemb_dec]
### set up regularizer
self.L1 += T.sum(abs(self.W_hy))
self.L2_sqr += T.sum(self.W_hy**2)
def compile(self,optimizer='Adam',loss='nll_multiclass_3d'):
self.build()
next_y=T.matrix()
next_h=T.matrix()
# output of model
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
y_pred=self.get_sample(next_y,next_h)
if type(self.x) == list:
train_ins = self.x + self.x_mask + [self.y,self.y_mask]
test_ins = self.x + self.x_mask + [self.y,self.y_mask]
predict_ins = self.x + self.x_mask+ next_y + next_h
else:
train_ins = [self.x, self.x_mask, self.y, self.y_mask]
test_ins = [self.x, self.x_mask, self.y, self.y_mask]
predict_ins = [self.x, self.x_mask, next_y, next_h]
### cost and updates
self.loss = eval(loss)
train_loss=self.loss(self.y, self.y_mask ,self.y_train)
test_loss=self.loss(self.y, self.y_mask ,self.y_test)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
#train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
#test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
#train_accuracy = T.mean(T.eq(self.y, T.argmax(self.y_train, axis=-1)))
#test_accuracy = T.mean(T.eq(self.y, T.argmax(self.y_test, axis=-1)))
cost = train_loss +self.L2_reg * self.L2_sqr
self.optimizer=eval(optimizer)()
updates=self.optimizer.get_updates(self.params,cost)
print 'Optimizer: '+optimizer
### compile theano functions
idx=T.lscalar()
if self.shared_emb:
self.get_embedding = theano.function(inputs = [idx,],
outputs = self.Wemb[idx],
mode = mode)
else:
self.get_embedding = theano.function(inputs = [idx,],
outputs = self.Wemb_dec[idx],
mode = mode)
self._train = theano.function(inputs = train_ins,
outputs = cost,
updates = updates,
mode = mode)
self._test = theano.function(inputs = test_ins,
outputs = test_loss,
mode = mode)
self._predict = theano.function(inputs = predict_ins,
outputs = y_pred,
mode = mode)
def train(self,train_set,val_set,worddict,verbose,shuffle=True,show_accuracy=False):
train_set_x = np.asarray(train_set[0], dtype='int32')
mask_set_x = np.asarray(train_set[1], dtype='float32')
train_set_y = np.asarray(train_set[2], dtype='int32')
mask_set_y = np.asarray(train_set[3], dtype='float32')
ins = [train_set_x, mask_set_x, train_set_y,mask_set_y]
val_set_x = np.asarray(val_set[0], dtype='int32')
mask_val_set_x = np.asarray(val_set[1], dtype='float32')
val_set_y = np.asarray(val_set[2], dtype='int32')
mask_val_set_y = np.asarray(val_set[3], dtype='float32')
val_ins = [val_set_x, mask_val_set_x, val_set_y,mask_val_set_y]
###############
# TRAIN MODEL #
###############
print 'Training model ...'
nb_train_sample = train_set_x.shape[0]
index_array = np.arange(nb_train_sample)
### call back###
history = cbks.History()
callbacks = [history, cbks.BaseLogger()]
callbacks = cbks.CallbackList(callbacks)
if show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
else:
f = self._train
out_labels = ['loss']
metrics = ['loss', 'val_loss']
do_validation = True
callbacks._set_model(self)
callbacks._set_params({
'batch_size': self.n_batch,
'nb_epoch': self.n_epochs,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': metrics,
})
callbacks.on_train_begin()
for epoch in range(self.n_epochs):
callbacks.on_epoch_begin(epoch)
if shuffle: np.random.shuffle(index_array)
train_losses=[]
batches = make_batches(nb_train_sample, self.n_batch)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
ins_batch = slice_X(ins, batch_ids)
except TypeError as err:
print('TypeError while preparing batch. \
If using HDF5 input data, pass shuffle="batch".\n')
raise
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
cost = f(*ins_batch)
if np.isnan(cost) or np.isinf(cost):
raise ValueError('NaN detected')
train_losses.append(cost)
#train_batch_sizes.append(get_batch_size(idx, n_train))
if type(cost) != list:
cost = [cost]
for l, o in zip(out_labels, cost):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
epoch_logs = {}
# compute loss on validation set
if np.mod(epoch+1,self.val_Freq)==0:
val_outs = self._test_loop(self._test, val_ins, batch_size=self.n_batch, verbose=0)
self.val_errors.append(val_outs)
if type(val_outs) != list:
val_outs = [val_outs]
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
this_train_loss = np.average(train_losses)
self.errors.append(this_train_loss)
### generating sample..
if np.mod(epoch+1,self.sample_Freq)==0:
print 'Generating a sample...'
for i in range(3):
rand=np.random.randint(1,nb_train_sample)
test=train_set_x[rand][None,:]
mask=mask_set_x[rand][None,:]
truth=train_set_y[rand][None,:]
ins_gen=[test,mask,truth]
self.generate(ins_gen,worddict)
### autimatically saving snapshot ..
if np.mod(epoch+1,self.snapshot)==0:
if epoch is not self.n_epochs: self.save()
def generate(self, test_set, worddict, with_truth=True,stochastic=True,batch_size=1):
test_set_x = np.asarray(test_set[0], dtype='int32')
mask_set_x = np.asarray(test_set[1], dtype='float32')
ins = [test_set_x, mask_set_x]
if with_truth:
test_set_y = np.asarray(test_set[2], dtype='int32')
ins_t=[test_set_y]
nb_sample = ins[0].shape[0]
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = self._generate_loop(self._predict, ins_batch,stochastic)
if with_truth:
ins_batch_t = slice_X(ins_t, batch_ids)
print 'Input: ',seq_to_text(ins_batch[0],worddict[0])
if with_truth: print 'Truth: ',seq_to_text(ins_batch_t[0],worddict[-1])
print 'Sample: ',seq_to_text(np.asarray(batch_outs[1]),worddict[-1])
return batch_outs
def get_sample(self,y,h):
ctx=self.layers[-1].get_input(False)
ctx_mean = (ctx * self.x_mask[:,:,None]).sum(1) / self.x_mask.sum(1)[:,None]
h = T.switch(h[0] < 0,
T.tanh(T.dot(ctx_mean, self.W_hi) + self.b_hi),
h)
h,logit=self.layers[-1].get_sample(y,h)
logit = dropout_layer(logit, False)
y_gen = T.dot(logit, self.W_hy) + self.b_hy
p_y_given_x_gen=T.nnet.softmax(y_gen)
return h,logit,p_y_given_x_gen
def _generate_loop(self,f,ins,stochastic=True,k=3):
# X_test=np.asarray(X_test[:,None],dtype='int32')
#X_mask=np.asarray(X_mask[:,None],dtype='float32')
X_test=ins[0]
X_mask=ins[1]
if X_test.ndim==1: X_test=X_test[None,:]
if X_mask.ndim==1: X_mask=X_mask[None,:]
sample=[]
sample_proba=[]
sample_score = []
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype('float32')
hyp_states = []
next_w=np.zeros((1,self.n_out)).astype('float32')
h_w=-1*np.ones((1,self.n_decoder)).astype('float32')
for i in xrange(self.maxlen):
gen_ins=[X_test,X_mask,next_w,h_w]
h_w,logit,p_y_given_x_gen=f(*gen_ins)
sample_proba.append(p_y_given_x_gen.flatten())
if stochastic: ### stochastic sampling
result = np.argmax(p_y_given_x_gen, axis = -1)[0]
sample.append(result)
w=self.get_embedding(result)
next_w=np.asarray(w.reshape((1,self.n_out))).astype('float32')
else:
p_y_given_x_gen=np.array(p_y_given_x_gen).astype('float32')
#print p_y_given_x_gen
cand_scores = hyp_scores[:,None] - np.log(p_y_given_x_gen.flatten())
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = p_y_given_x_gen.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = np.zeros(k-dead_k).astype('float32')
# new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[ti])
# new_hyp_states.append(copy.copy(result[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
#hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
#hyp_states.append(new_hyp_states[idx])
hyp_scores = np.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = np.array([w[-1] for w in hyp_samples])
w=self.get_embedding(next_w[0])
next_w=np.asarray(w.reshape((1,self.n_out))).astype('float32')
#next_state = np.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
sample=sample[np.argmin(sample_score)]
return sample_proba,sample
def evaluate(self, test_set, batch_size=128, show_accuracy=False, verbose=1):
test_set_x = np.asarray(test_set[0], dtype='int32')
mask_set_x = np.asarray(test_set[1], dtype='float32')
test_set_y = np.asarray(test_set[2], dtype='int32')
mask_set_y = np.asarray(test_set[3], dtype='float32')
ins = [test_set_x, mask_set_x, test_set_y,mask_set_y]
if show_accuracy:
f = self._test_with_acc
else:
f = self._test
outs = self._test_loop(f, ins, batch_size, verbose)
if show_accuracy:
return outs
else:
return outs[0]
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
|
chuckgu/Alphabeta
|
theano/library/Model_ENC_DEC.py
|
Python
|
gpl-3.0
| 22,150
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for chords_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from magenta.common import testing_lib as common_testing_lib
from magenta.music import chord_symbols_lib
from magenta.music import chords_lib
from magenta.music import constants
from magenta.music import melodies_lib
from magenta.music import sequences_lib
from magenta.music import testing_lib
from magenta.protobuf import music_pb2
import tensorflow as tf
NO_CHORD = constants.NO_CHORD
class ChordsLibTest(tf.test.TestCase):
def setUp(self):
self.steps_per_quarter = 1
self.note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4
}
tempos: {
qpm: 60
}
""")
def testTranspose(self):
# Transpose ChordProgression with basic triads.
events = ['Cm', 'F', 'Bb', 'Eb']
chords = chords_lib.ChordProgression(events)
chords.transpose(transpose_amount=7)
expected = ['Gm', 'C', 'F', 'Bb']
self.assertEqual(expected, list(chords))
# Transpose ChordProgression with more complex chords.
events = ['Esus2', 'B13', 'A7/B', 'F#dim']
chords = chords_lib.ChordProgression(events)
chords.transpose(transpose_amount=-2)
expected = ['Dsus2', 'A13', 'G7/A', 'Edim']
self.assertEqual(expected, list(chords))
# Transpose ChordProgression containing NO_CHORD.
events = ['C', 'Bb', NO_CHORD, 'F', 'C']
chords = chords_lib.ChordProgression(events)
chords.transpose(transpose_amount=4)
expected = ['E', 'D', NO_CHORD, 'A', 'E']
self.assertEqual(expected, list(chords))
def testTransposeUnknownChordSymbol(self):
# Attempt to transpose ChordProgression with unknown chord symbol.
events = ['Cm', 'G7', 'P#13', 'F']
chords = chords_lib.ChordProgression(events)
with self.assertRaises(chord_symbols_lib.ChordSymbolError):
chords.transpose(transpose_amount=-4)
def testFromQuantizedNoteSequence(self):
testing_lib.add_chords_to_sequence(
self.note_sequence,
[('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 14)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
chords = chords_lib.ChordProgression()
chords.from_quantized_sequence(
quantized_sequence, start_step=0, end_step=16)
expected = [NO_CHORD, NO_CHORD, NO_CHORD, NO_CHORD,
'Am', 'Am', 'Am', 'Am', 'D7', 'D7', 'D7', 'D7',
'G13', 'G13', 'Csus', 'Csus']
self.assertEqual(expected, list(chords))
def testFromQuantizedNoteSequenceWithinSingleChord(self):
testing_lib.add_chords_to_sequence(
self.note_sequence, [('F', 0), ('Gm', 8)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
chords = chords_lib.ChordProgression()
chords.from_quantized_sequence(
quantized_sequence, start_step=4, end_step=6)
expected = ['F'] * 2
self.assertEqual(expected, list(chords))
def testFromQuantizedNoteSequenceWithNoChords(self):
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
chords = chords_lib.ChordProgression()
chords.from_quantized_sequence(
quantized_sequence, start_step=0, end_step=16)
expected = [NO_CHORD] * 16
self.assertEqual(expected, list(chords))
def testFromQuantizedNoteSequenceWithCoincidentChords(self):
testing_lib.add_chords_to_sequence(
self.note_sequence,
[('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 12)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
chords = chords_lib.ChordProgression()
with self.assertRaises(chords_lib.CoincidentChordsError):
chords.from_quantized_sequence(
quantized_sequence, start_step=0, end_step=16)
def testExtractChords(self):
testing_lib.add_chords_to_sequence(
self.note_sequence, [('C', 2), ('G7', 6), ('F', 8)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
quantized_sequence.total_quantized_steps = 10
chord_progressions, _ = chords_lib.extract_chords(quantized_sequence)
expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'F', 'F']]
self.assertEqual(expected, [list(chords) for chords in chord_progressions])
def testExtractChordsAllTranspositions(self):
testing_lib.add_chords_to_sequence(
self.note_sequence, [('C', 1)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
quantized_sequence.total_quantized_steps = 2
chord_progressions, _ = chords_lib.extract_chords(quantized_sequence,
all_transpositions=True)
expected = list(zip([NO_CHORD] * 12, ['Gb', 'G', 'Ab', 'A', 'Bb', 'B',
'C', 'Db', 'D', 'Eb', 'E', 'F']))
self.assertEqual(expected, [tuple(chords) for chords in chord_progressions])
def testExtractChordsForMelodies(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 2, 4), (11, 1, 6, 11)])
testing_lib.add_track_to_sequence(
self.note_sequence, 1,
[(12, 127, 2, 4), (14, 50, 6, 8),
(50, 100, 33, 37), (52, 100, 34, 37)])
testing_lib.add_chords_to_sequence(
self.note_sequence,
[('C', 2), ('G7', 6), ('Cmaj7', 33)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
melodies, _ = melodies_lib.extract_melodies(
quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
ignore_polyphonic_notes=True)
chord_progressions, _ = chords_lib.extract_chords_for_melodies(
quantized_sequence, melodies)
expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C',
'G7', 'G7', 'G7', 'G7', 'G7'],
[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
['G7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
self.assertEqual(expected, [list(chords) for chords in chord_progressions])
def testExtractChordsForMelodiesCoincidentChords(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 2, 4), (11, 1, 6, 11)])
testing_lib.add_track_to_sequence(
self.note_sequence, 1,
[(12, 127, 2, 4), (14, 50, 6, 8),
(50, 100, 33, 37), (52, 100, 34, 37)])
testing_lib.add_chords_to_sequence(
self.note_sequence,
[('C', 2), ('G7', 6), ('E13', 8), ('Cmaj7', 8)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
melodies, _ = melodies_lib.extract_melodies(
quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
ignore_polyphonic_notes=True)
chord_progressions, stats = chords_lib.extract_chords_for_melodies(
quantized_sequence, melodies)
expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
stats_dict = dict((stat.name, stat) for stat in stats)
self.assertIsNone(chord_progressions[0])
self.assertEqual(expected,
[list(chords) for chords in chord_progressions[1:]])
self.assertEqual(stats_dict['coincident_chords'].count, 1)
def testToSequence(self):
chords = chords_lib.ChordProgression(
[NO_CHORD, 'C7', 'C7', 'C7', 'C7', 'Am7b5', 'F6', 'F6', NO_CHORD])
sequence = chords.to_sequence(sequence_start_time=2, qpm=60.0)
self.assertProtoEquals(
'ticks_per_quarter: 220 '
'tempos < qpm: 60.0 > '
'text_annotations < '
' text: "C7" time: 2.25 annotation_type: CHORD_SYMBOL '
'> '
'text_annotations < '
' text: "Am7b5" time: 3.25 annotation_type: CHORD_SYMBOL '
'> '
'text_annotations < '
' text: "F6" time: 3.5 annotation_type: CHORD_SYMBOL '
'> '
'text_annotations < '
' text: "N.C." time: 4.0 annotation_type: CHORD_SYMBOL '
'> ',
sequence)
def testEventListChordsWithMelodies(self):
note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
note_sequence.tempos.add(qpm=60.0)
testing_lib.add_chords_to_sequence(
note_sequence, [('N.C.', 0), ('C', 2), ('G7', 6)])
note_sequence.total_time = 8.0
melodies = [
melodies_lib.Melody([60, -2, -2, -1],
start_step=0, steps_per_quarter=1, steps_per_bar=4),
melodies_lib.Melody([62, -2, -2, -1],
start_step=4, steps_per_quarter=1, steps_per_bar=4),
]
quantized_sequence = sequences_lib.quantize_note_sequence(
note_sequence, steps_per_quarter=1)
chords = chords_lib.event_list_chords(quantized_sequence, melodies)
expected_chords = [
[NO_CHORD, NO_CHORD, 'C', 'C'],
['C', 'C', 'G7', 'G7']
]
self.assertEqual(expected_chords, chords)
def testAddChordsToSequence(self):
note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
note_sequence.tempos.add(qpm=60.0)
testing_lib.add_chords_to_sequence(
note_sequence, [('N.C.', 0), ('C', 2), ('G7', 6)])
note_sequence.total_time = 8.0
expected_sequence = copy.deepcopy(note_sequence)
del note_sequence.text_annotations[:]
chords = [NO_CHORD, 'C', 'C', 'G7']
chord_times = [0.0, 2.0, 4.0, 6.0]
chords_lib.add_chords_to_sequence(note_sequence, chords, chord_times)
self.assertEqual(expected_sequence, note_sequence)
if __name__ == '__main__':
tf.test.main()
|
jesseengel/magenta
|
magenta/music/chords_lib_test.py
|
Python
|
apache-2.0
| 10,501
|
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import RegexValidator
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
from betterself.users.models import UserPhoneNumberDetails, TIMEZONE_CHOICES
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+999999999'. "
'Up to 15 digits allowed.')
User = get_user_model()
class UserDetailsSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length=4, max_length=32,
validators=[UniqueValidator(queryset=User.objects.all())]
)
password = serializers.CharField(min_length=8, max_length=32, write_only=True)
timezone = serializers.ChoiceField(choices=TIMEZONE_CHOICES, default='US/Eastern')
supplements = serializers.CharField(max_length=350, default=None)
phone_number = serializers.SerializerMethodField(read_only=True)
email = serializers.EmailField(required=False)
token = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = ('uuid', 'username', 'password', 'timezone', 'supplements', 'phone_number', 'email', 'token')
def create(self, validated_data):
# Override create in this serializer so we can use the function create_user
# thus resulting in salted password hashes
user = User.objects.create_user(**validated_data)
return user
def validate(self, validated_data):
cleaned_supplements = self._clean_supplements(validated_data['supplements'])
validated_data['supplements'] = cleaned_supplements
return validated_data
@staticmethod
def get_phone_number(user):
try:
user_phone_number = user.userphonenumberdetails
except ObjectDoesNotExist:
return
else:
phone_number = user_phone_number.phone_number
phone_number_serialized = phone_number.as_e164
return phone_number_serialized
@staticmethod
def get_token(instance):
""" Gets the API Token if it exists """
try:
return instance.auth_token.key
except ObjectDoesNotExist:
token, _ = Token.objects.get_or_create(user=instance)
return token.key
@staticmethod
def _clean_supplements(supplement_string):
if not supplement_string:
return
supplements_cleaned = []
supplements = supplement_string.split(',')
for supplement in supplements:
# urls coming from the web with have %20, but it really means a space
name = supplement.strip().title().replace('%20', ' ')
supplements_cleaned.append(name)
return supplements_cleaned
class PhoneNumberDetailsSerializer(serializers.ModelSerializer):
phone_number = serializers.CharField(validators=[phone_regex])
is_verified = serializers.BooleanField(read_only=True)
class Meta:
fields = ['phone_number', 'is_verified']
model = UserPhoneNumberDetails
|
jeffshek/betterself
|
apis/betterself/v1/users/serializers.py
|
Python
|
mit
| 3,207
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ozmaxweb.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
ozmax/ozmaxweb
|
manage.py
|
Python
|
gpl-2.0
| 251
|
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, tablature module.
# Copyright (C) 2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functions to convert mingus.containers to pretty ASCII tablature."""
from __future__ import absolute_import
import mingus.extra.tunings as tunings
from mingus.core.mt_exceptions import RangeError, FingerError
import os
from six.moves import range
default_tuning = tunings.get_tuning("Guitar", "Standard", 6, 1)
def begin_track(tuning, padding=2):
"""Helper function that builds the first few characters of every bar."""
# find longest shorthand tuning base
names = [x.to_shorthand() for x in tuning.tuning]
basesize = len(max(names)) + 3
# Build result
res = []
for x in names:
r = " %s" % x
spaces = basesize - len(r)
r += " " * spaces + "||" + "-" * padding
res.append(r)
return res
def add_headers(
width=80,
title="Untitled",
subtitle="",
author="",
email="",
description="",
tunings=None,
):
"""Create a nice header in the form of a list of strings using the
information that has been filled in.
All arguments except 'width' and 'tunings' should be strings. 'width'
should be an integer and 'tunings' a list of tunings representing the
instruments.
"""
if tunings is None:
tunings = []
result = [""]
title = str.upper(title)
result += [str.center(" ".join(title), width)]
if subtitle != "":
result += ["", str.center(str.title(subtitle), width)]
if author != "" or email != "":
result += ["", ""]
if email != "":
result += [str.center("Written by: %s <%s>" % (author, email), width)]
else:
result += [str.center("Written by: %s" % author, width)]
if description != "":
result += ["", ""]
words = description.split()
lines = []
line = []
last = 0
for word in words:
if len(word) + last < width - 10:
line.append(word)
last += len(word) + 1
else:
lines.append(line)
line = [word]
last = len(word) + 1
lines.append(line)
for line in lines:
result += [str.center(" ".join(line), width)]
if tunings != []:
result += ["", "", str.center("Instruments", width)]
for (i, tuning) in enumerate(tunings):
result += [
"",
str.center("%d. %s" % (i + 1, tuning.instrument), width),
str.center(tuning.description, width),
]
result += ["", ""]
return result
def from_Note(note, width=80, tuning=None):
"""Return a string made out of ASCII tablature representing a Note object
or note string.
Throw a RangeError if a suitable fret can't be found.
'tuning' should be a StringTuning object or None for the default tuning.
To force a certain fingering you can use a 'string' and 'fret' attribute
on the Note. If the fingering is valid, it will get used instead of the
default one.
"""
if tuning is None:
tuning = default_tuning
result = begin_track(tuning)
min = 1000
(s, f) = (-1, -1)
# Do an attribute check
if hasattr(note, "string") and hasattr(note, "fret"):
n = tuning.get_Note(note.string, note.fret)
if n is not None and int(n) == int(note):
(s, f) = (note.string, note.fret)
min = 0
if min == 1000:
for (string, fret) in enumerate(tuning.find_frets(note)):
if fret is not None:
if fret < min:
min = fret
(s, f) = (string, fret)
l = len(result[0])
w = max(4, (width - l) - 1)
# Build ASCII
if min != 1000:
fret = str(f)
for i in range(len(result)):
d = len(fret)
if i != s:
result[i] += "-" * w + "|"
else:
d = w - len(fret)
result[i] += "-" * (d // 2) + fret
d = (w - d // 2) - len(fret)
result[i] += "-" * d + "|"
else:
raise RangeError("No fret found that could play note '%s'. " "Note out of range." % note)
result.reverse()
return os.linesep.join(result)
def from_NoteContainer(notes, width=80, tuning=None):
"""Return a string made out of ASCII tablature representing a
NoteContainer object or list of note strings / Note objects.
Throw a FingerError if no playable fingering can be found.
'tuning' should be a StringTuning object or None for the default tuning.
To force a certain fingering you can use a 'string' and 'fret' attribute
on one or more of the Notes. If the fingering is valid, it will get used
instead of the default one.
"""
if tuning is None:
tuning = default_tuning
result = begin_track(tuning)
l = len(result[0])
w = max(4, (width - l) - 1)
fingerings = tuning.find_fingering(notes)
if fingerings != []:
# Do an attribute check
f = []
attr = []
for note in notes:
if hasattr(note, "string") and hasattr(note, "fret"):
n = tuning.get_Note(note.string, note.fret)
if n is not None and int(n) == int(note):
f += (note.string, note.fret)
attr.append(int(note))
# See if there are any possible fingerings with the attributes
# that are set.
fres = []
if f != []:
for x in fingerings:
found = True
for pos in f:
if pos not in x:
found = False
if found:
fres.append(x)
# Use best fingering.
if fres != []:
f = fres[0]
else:
# Use default fingering if attributes don't make sense
f = fingerings[0]
# Build {string: fret} result
res = {}
for (string, fret) in f:
res[string] = str(fret)
maxfret = max(res.values())
# Produce ASCII
for i in range(len(result)):
if i not in res:
result[i] += "-" * w + "|"
else:
d = w - len(res[i])
result[i] += "-" * (d // 2) + res[i]
d = (w - d // 2) - len(res[i])
result[i] += "-" * d + "|"
else:
raise FingerError("No playable fingering found for: %s" % notes)
result.reverse()
return os.linesep.join(result)
def from_Bar(bar, width=40, tuning=None, collapse=True):
"""Convert a mingus.containers.Bar object to ASCII tablature.
Throw a FingerError if no playable fingering can be found.
'tuning' should be a StringTuning object or None for the default tuning.
If 'collapse' is False this will return a list of lines, if it's True
all lines will be concatenated with a newline symbol.
Use 'string' and 'fret' attributes on Notes to force certain fingerings.
"""
if tuning is None:
tuning = default_tuning
# Size of a quarter note
qsize = _get_qsize(tuning, width)
result = begin_track(tuning, max(2, qsize // 2))
# Add bar
for entry in bar.bar:
(beat, duration, notes) = entry
fingering = tuning.find_fingering(notes)
if fingering != [] or notes is None:
# Do an attribute check
f = []
attr = []
if notes is not None:
for note in notes:
if hasattr(note, "string") and hasattr(note, "fret"):
n = tuning.get_Note(note.string, note.fret)
if n is not None and int(n) == int(note):
f.append((note.string, note.fret))
attr.append(int(note))
# See if there are any possible fingerings with the attributes that
# are set.
fres = []
if f != []:
for x in fingering:
found = True
for pos in f:
if pos not in x:
found = False
if found:
fres.append(x)
# Use best fingering.
maxlen = 0
if fres != []:
f = fres[0]
else:
# Use default fingering if attributes don't make sense
if notes is None:
f = []
maxlen = 1
else:
f = fingering[0]
# Make {string: fret} dictionary and find highest fret
d = {}
for (string, fret) in f:
d[string] = str(fret)
if len(str(fret)) > maxlen:
maxlen = len(str(fret))
# Add to result
for i in range(len(result)):
dur = int(((1.0 / duration) * qsize) * 4) - maxlen
if i not in d:
result[i] += "-" * maxlen + "-" * dur
else:
result[i] += ("%" + str(maxlen) + "s") % d[i] + "-" * dur
else:
raise FingerError("No playable fingering found for: %s" % notes)
# Padding at the end
l = len(result[i]) + 1
for i in range(len(result)):
result[i] += (width - l) * "-" + "|"
result.reverse()
# Mark quarter notes
pad = " " * int(((1.0 / bar.meter[1]) * qsize) * 4 - 1)
r = " " * (result[0].find("||") + 2 + max(2, qsize // 2)) + ("*" + pad) * bar.meter[0]
r += " " * (len(result[0]) - len(r))
if not collapse:
return [r] + result
else:
return os.linesep.join([r] + result)
def from_Track(track, maxwidth=80, tuning=None):
"""Convert a mingus.containers.Track object to an ASCII tablature string.
'tuning' should be set to a StringTuning object or to None to use the
Track's tuning (or alternatively the default if the Track hasn't got its
own tuning).
'string' and 'fret' attributes on Notes are taken into account.
"""
result = []
width = _get_width(maxwidth)
if not tuning:
tuning = track.get_tuning()
lastlen = 0
for bar in track:
r = from_Bar(bar, width, tuning, collapse=False)
barstart = r[1].find("||") + 2
if (len(r[0]) + lastlen) - barstart < maxwidth and result != []:
for i in range(1, len(r) + 1):
item = r[len(r) - i]
result[-i] += item[barstart:]
else:
result += ["", ""] + r
lastlen = len(result[-1])
return os.linesep.join(result)
def from_Composition(composition, width=80):
"""Convert a mingus.containers.Composition to an ASCII tablature string.
Automatically add an header based on the title, subtitle, author, e-mail
and description attributes. An extra description of the piece can also
be given.
Tunings can be set by using the Track.instrument.tuning or Track.tuning
attribute.
"""
# Collect tunings
instr_tunings = []
for track in composition:
tun = track.get_tuning()
if tun:
instr_tunings.append(tun)
else:
instr_tunings.append(default_tuning)
result = add_headers(
width,
composition.title,
composition.subtitle,
composition.author,
composition.email,
composition.description,
instr_tunings,
)
# Some variables
w = _get_width(width)
barindex = 0
bars = width / w
lastlen = 0
maxlen = max([len(x) for x in composition.tracks])
while barindex < maxlen:
notfirst = False
for tracks in composition:
tuning = tracks.get_tuning()
ascii = []
for x in range(bars):
if barindex + x < len(tracks):
bar = tracks[barindex + x]
r = from_Bar(bar, w, tuning, collapse=False)
barstart = r[1].find("||") + 2
# Add extra '||' to quarter note marks to connect tracks.
if notfirst:
r[0] = (r[0])[: barstart - 2] + "||" + (r[0])[barstart:]
# Add bar to ascii
if ascii != []:
for i in range(1, len(r) + 1):
item = r[len(r) - i]
ascii[-i] += item[barstart:]
else:
ascii += r
# Add extra '||' to connect tracks
if notfirst and ascii != []:
pad = ascii[-1].find("||")
result += [" " * pad + "||", " " * pad + "||"]
else:
notfirst = True
# Finally, add ascii to result
result += ascii
result += ["", "", ""]
barindex += bars
return os.linesep.join(result)
def from_Suite(suite, maxwidth=80):
"""Convert a mingus.containers.Suite to an ASCII tablature string, complete
with headers.
This function makes use of the Suite's title, subtitle, author, email
and description attributes.
"""
subtitle = (
str(len(suite.compositions)) + " Compositions" if suite.subtitle == "" else suite.subtitle
)
result = os.linesep.join(
add_headers(
maxwidth,
suite.title,
subtitle,
suite.author,
suite.email,
suite.description,
)
)
hr = maxwidth * "="
n = os.linesep
result = n + hr + n + result + n + hr + n + n
for comp in suite:
c = from_Composition(comp, maxwidth)
result += c + n + hr + n + n
return result
def _get_qsize(tuning, width):
"""Return a reasonable quarter note size for 'tuning' and 'width'."""
names = [x.to_shorthand() for x in tuning.tuning]
basesize = len(max(names)) + 3
barsize = ((width - basesize) - 2) - 1
# x * 4 + 0.5x - barsize = 0 4.5x = barsize x = barsize / 4.5
return max(0, int(barsize / 4.5))
def _get_width(maxwidth):
"""Return the width of a single bar, when width of the page is given."""
width = maxwidth / 3
if maxwidth <= 60:
width = maxwidth
elif 60 < maxwidth <= 120:
width = maxwidth / 2
return width
|
bspaans/python-mingus
|
mingus/extra/tablature.py
|
Python
|
gpl-3.0
| 15,116
|
import os
import re
import json
from django.conf import settings
import sh
import mapnik
import time
import logging
from terrapyn.geocms.cache import CacheManager
from terrapyn.geocms import models
_log = logging.getLogger('terrapyn.driver_messages')
LAYER_CACHE_PATH = getattr(settings, 'LAYER_CACHE_PATH')
if not os.path.exists(LAYER_CACHE_PATH):
sh.mkdir('-p', LAYER_CACHE_PATH)
class Renderer(object):
def compile_layer(self, rl, layer_id, srs, css_classes, **parameters):
"""Take a RenderedLayer and turn it into a Mapnik input file clause"""
return {
"id" : parameters['id'] if 'id' in parameters else re.sub('/', '_', layer_id),
"name" : parameters['name'] if 'name' in parameters else re.sub('/', '_', layer_id),
"class" : ' '.join(rl.default_class if 'default' else cls for cls in css_classes).strip(),
"srs" : srs if isinstance(srs, basestring) else srs.ExportToProj4(),
"Datasource" : parameters
}
def compile_mml(self, srs, styles, *layers):
"""Take multiple layers and stylesheets and turn it into a Mapnik input file"""
stylesheets = models.Style.objects.filter(slug__in=[s.split('.')[0] for s in styles])
css_classes = set([s.split('.')[1] if '.' in s else 'default' for s in styles])
_log.debug('compile_mml {0}'.format(layers))
mml = {
'srs': srs,
'Stylesheet': [{"id": re.sub('/', '_', stylesheet.slug), "data" : stylesheet.stylesheet } for stylesheet in stylesheets],
'Layer': [self.compile_layer(rl, layer_id, lsrs, css_classes, **parms) for rl, (layer_id, lsrs, parms) in layers]
}
return mml
def compile_mapfile(self, name, srs, stylesheets, *layers):
"""Compile from Carto to Mapnik"""
_log.debug('compiling mapfile')
with open(name + ".mml", 'w') as mapfile:
mapfile.write(json.dumps(self.compile_mml(srs, stylesheets, *layers), indent=4))
carto = sh.Command(settings.CARTO_HOME + "/bin/carto")
carto(name + '.mml', _out=name + '.xml')
_log.debug('compiled mapfile')
def prepare_wms(self, layers, srs, styles, bgcolor=None, transparent=True, **kwargs):
"""Take a WMS query and turn it into the appropriate MML file, if need be. Or look up the cached MML file"""
_log.debug('prepare wms {0} {1}'.format(layers, styles))
if not os.path.exists(LAYER_CACHE_PATH):
os.makedirs(LAYER_CACHE_PATH) # just in case it's not there yet.
cached_filename = CacheManager.cache_entry_name(
layers, srs, styles,
bgcolor=bgcolor,
transparent=transparent,
query=kwargs['query'] if 'query' in kwargs else None
)
_log.debug('cache entry {0}'.format(cached_filename))
layer_specs = []
for layer in layers:
if "#" in layer:
layer, kwargs['sublayer'] = layer.split("#")
_log.debug('pulling layer {0}'.format(layer))
rendered_layer = models.Layer.objects.get(slug=layer)
driver = rendered_layer.data_resource.driver_instance
layer_spec = driver.get_rendering_parameters(**kwargs)
layer_specs.append((rendered_layer, layer_spec))
if not os.path.exists(cached_filename + ".xml"): # not an else as previous clause may remove file.
try:
with open(cached_filename + ".lock", 'w') as w:
self.compile_mapfile(cached_filename, srs, styles, *layer_specs)
os.unlink(cached_filename + ".lock")
except sh.ErrorReturnCode_1, e:
os.unlink(cached_filename + ".lock")
raise RuntimeError(str(e.stderr))
return cached_filename
def render(self, fmt, width, height, bbox, srs, styles, layers, **kwargs):
"""Render a WMS request or a tile. TODO - create an SQLite cache for this as well, based on hashed filename."""
_log.debug('render called for {0}'.format(layers))
if srs.lower().startswith('epsg'):
if srs.endswith("900913") or srs.endswith("3857"):
srs = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null"
else:
srs = "+init=" + srs.lower()
name = self.prepare_wms(layers, srs, styles, **kwargs)
filename = "{name}.{bbox}.{width}x{height}.{fmt}".format(
name=name,
bbox='_'.join(str(b) for b in bbox),
width=width,
height=height,
fmt=fmt
)
_log.debug('waiting on lock')
while os.path.exists(name + ".lock"):
time.sleep(0.05)
_log.debug('rendering {0}x{1} tile'.format(width, height))
m = mapnik.Map(width, height)
mapnik.load_map(m, (name + '.xml').encode('ascii'))
m.zoom_to_box(mapnik.Box2d(*bbox))
mapnik.render_to_file(m, filename, fmt)
with open(filename) as tiledata:
tile = buffer(tiledata.read())
os.unlink(filename)
return filename, tile
|
JeffHeard/terrapyn
|
geocms/rendering.py
|
Python
|
apache-2.0
| 5,180
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
|
diegoguimaraes/django
|
tests/proxy_models/urls.py
|
Python
|
bsd-3-clause
| 142
|
# -*- coding: utf-8 -*-
from flask import render_template, send_from_directory
from . import main_blueprint
@main_blueprint.route('/')
def index():
return render_template('index.html')
|
NQysit/pybwap
|
pybwap/main/views.py
|
Python
|
mit
| 195
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from catapult_build import html_checks
class MockAffectedFile(object):
def __init__(self, path, lines):
self.path = path
self.lines = lines
def NewContents(self):
return (l for l in self.lines)
def LocalPath(self):
return self.path
class MockInputApi(object):
def __init__(self, affected_files):
self.affected_files = affected_files
def AffectedFiles(self, file_filter=None, **_):
if file_filter:
return filter(file_filter, self.affected_files)
return self.affected_files
class MockOutputApi(object):
def PresubmitError(self, error_text):
return error_text
class HtmlChecksTest(unittest.TestCase):
def testRunChecksShowsErrorForWrongDoctype(self):
f = MockAffectedFile('foo/x.html', ['<!DOCTYPE XHTML1.0>'])
errors = html_checks.RunChecks(MockInputApi([f]), MockOutputApi())
self.assertEqual(1, len(errors))
def testRunChecksReturnsErrorForEmptyFile(self):
f = MockAffectedFile('foo/x.html', [])
errors = html_checks.RunChecks(MockInputApi([f]), MockOutputApi())
self.assertEqual(1, len(errors))
def testRunChecksNoErrorsForFileWithCorrectDocstring(self):
f = MockAffectedFile('foo/x.html', ['<!DOCTYPE html> '])
errors = html_checks.RunChecks(MockInputApi([f]), MockOutputApi())
self.assertEqual([], errors)
def testRunChecksAcceptsDifferentCapitalization(self):
f = MockAffectedFile('foo/x.html', ['<!doctype HtMl> '])
errors = html_checks.RunChecks(MockInputApi([f]), MockOutputApi())
self.assertEqual([], errors)
def testRunChecksAcceptsCommentsBeforeDoctype(self):
f = MockAffectedFile('foo/x.html', ['<!-- asdf -->\n<!doctype html> '])
errors = html_checks.RunChecks(MockInputApi([f]), MockOutputApi())
self.assertEqual([], errors)
def testRunChecksSkipsFilesInExcludedPaths(self):
f = MockAffectedFile('foo/x.html', ['<!DOCTYPE html XHTML1.0>'])
errors = html_checks.RunChecks(
MockInputApi([f]), MockOutputApi(), excluded_paths=['^foo/.*'])
self.assertEqual([], errors)
def testRunChecksSkipsNonHtmlFiles(self):
f = MockAffectedFile('foo/bar.py', ['#!/usr/bin/python', 'print 10'])
errors = html_checks.RunChecks(MockInputApi([f]), MockOutputApi())
self.assertEqual([], errors)
|
SummerLW/Perf-Insight-Report
|
catapult_build/html_checks_unittest.py
|
Python
|
bsd-3-clause
| 2,443
|
# Gambit scripts
#
# Copyright (C) USC Information Sciences Institute
# Author: Nibir Bora <nbora@usc.edu>
# URL: <http://cbg.isi.edu/>
# For license information, see LICENSE
# Folders
#DATA_FOLDER = 'all-hoods/'
#DATA_FOLDER = 'regions/'
#DATA_FOLDER = 'hbk/'
#DATA_FOLDER = 'hbk_old/'
#DATA_FOLDER = 'hbk_old2/'
#DATA_FOLDER = 'south-la/'
#DATA_FOLDER = 'west-la/'
#DATA_FOLDER = 'south-bay/'
#DATA_FOLDER = 'pomona/'
#DATA_FOLDER = 'bernardino/'
#DATA_FOLDER = 'riverside/'
#DATA_FOLDER = 'blood_crip/'
# Relations
REL_TWEET = 't3_tweet_6'
#REL_TWEET = 't2_tweet'
REL_HOME = 't4_home'
#REL_HOME = 'hbk_home'
#REL_HOME = 'hbk_home2'
REL_NHOOD = 't4_nhood'
#REL_NHOOD = 'nh_blood_crip'
# Multi processing
PROCESSES = 12
# Params
TIMEZONE = 'America/Los_Angeles'
TZ_OFFSET = -8
HH_START = 19 # 24 hour format
HH_END = 4
QUERY_CONSTRAINT = "AND (timestamp < '2012-12-15 00:00:00' OR timestamp > '2013-01-10 00:00:00')"
MIN_NIGHT_TW = 50
MAX_NIGHT_TW = 5000
MIN_FRAC_INSIDE = 0.5
MAX_TW_TO_CLUSTER = 500
DBSCAN_EPS = 0.003
DBSCAN_MIN = 30
USER_IDS_FROM_DB = True
USER_IDS_FROM_DB = False if DATA_FOLDER == 'hbk/' else True
# DB
DB_CONN_STRING = "host='76.170.75.150' dbname='twitter' user='twitter' password='flat2#tw1tter'"
#DB_CONN_STRING = "host='brain.isi.edu' dbname='twitter' user='twitter' password='flat2#tw1tter'"
# Plot settings
PLOT_LABEL_ABS = True
MIN_DIR_DIST = 500
COLORS = {'hbk': '#377EB8',
'hbk_old': '#377EB8',
'hbk_old2': '#377EB8',
'south-la' : '#FA71AF',
'west-la' : '#4DAF4A',
'south-bay' : '#A65628',
'pomona' : '#3B3B3B',
'bernardino' : '#984EA3',
'riverside' : '#FF7F00'}
|
nbir/gambit-scripts
|
scripts/LAnhoodAnalysis/settings.py
|
Python
|
apache-2.0
| 1,709
|
from slack import Slack
|
iiegor/reaper
|
lib/ports/__init__.py
|
Python
|
mit
| 23
|
########################################################
# core.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2014/2/6
# Last updated: 2014/10/22
########################################################
import numpy as np
import sys
from utilities import *
#########################################################
# Function to perform the prediction algorithm
#
def predict(removedTensor, para):
# tMean: mean value along the time
numTimeSlice = removedTensor.shape[2]
tMeanMatrix = np.sum(removedTensor, axis=2) /\
(np.sum(removedTensor > 0, axis=2) + np.spacing(1))
predTensor = np.rollaxis(np.tile(tMeanMatrix, (numTimeSlice, 1, 1)), 0, 3)
# mean value of the slice
for i in xrange(numTimeSlice):
removedMatrix = removedTensor[:, :, i]
sliceMean = np.sum(removedMatrix) / (np.sum(removedMatrix > 0) + np.spacing(1))
predTensor[:, :, i] = np.where(predTensor[:, :, i] > 0, predTensor[:, :, i], sliceMean)
return predTensor
#########################################################
|
wsdream/CARP
|
Baseline/src/core.py
|
Python
|
mit
| 1,046
|
from users.models import Employee, EmployeeStatus
from django.contrib import admin
admin.site.register(Employee)
admin.site.register(EmployeeStatus)
|
gdebure/cream
|
users/admin.py
|
Python
|
gpl-3.0
| 150
|
from big_cheese import BigCheese
from collections import defaultdict
from .exceptions import PNCounterException
class PNCounter(BigCheese):
def __init__(self):
self.p = defaultdict(int)
self.n = defaultdict(int)
def value(self):
return sum(self.p.itervalues()) - sum(self.n.itervalues())
def increment(self, delta=1, node=None):
if delta < 0:
raise PNCounterException('Use decrement operation to decrement counter value.')
if not node:
node = BigCheese.node()
self.p[node] += int(delta)
def decrement(self, delta=1, node=None):
if delta < 0:
raise PNCounterException('Use increment operation to increment counter value.')
if not node:
node = BigCheese.node()
self.n[node] += int(delta)
def merge(self, other):
if not isinstance(other, self.__class__):
raise PNCounterException('Attempted to merge with different type.')
all_p_keys = self.p.viewkeys() | other.p
for k in all_p_keys:
if k in other.p:
self.p[k] = max(self.p[k], other.p[k])
all_n_keys = self.n.viewkeys() | other.n
for k in all_n_keys:
if k in other.n:
self.n[k] = max(self.n[k], other.n[k])
def to_dict(self):
return {
'type': self.crdt_type(),
'p': dict(self.p),
'n': dict(self.n)
}
def crdt_type(self):
return 'pn-counter'
|
Sushant/cheeses
|
crdt/pn_counter.py
|
Python
|
mit
| 1,526
|
alt_map = {'ins':'0'}
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
def revComplement(seq):
for k,v in alt_map.items():
seq = seq.replace(k,v)
bases = list(seq)
bases = reversed([complement.get(base,base) for base in bases])
bases = ''.join(bases)
for k,v in alt_map.items():
bases = bases.replace(v,k)
return bases
def pairConcatenate(reads1, reads2):
pairedReads = []
clusters1 = {}
clusters2 = {}
for r in reads1:
clusters1[r.cluster] = r
for r in reads2:
clusters2[r.cluster] = r
for c in clusters1:
if c in clusters2:
newSeq = clusters1[c].seq + revComplement(clusters2[c].seq)
newSeqN = clusters1[c].seq + 'N' + revComplement(clusters2[c].seq)
newRead = pairedSeq(clusters1[c].seq_id, clusters2[c].seq_id,
c, clusters1[c].header, clusters2[c].header,
newSeq, newSeqN)
pairedReads.append(newRead)
return pairedReads
def buildClusterDict(reads):
clusterDict = {}
for r in reads:
clusterDict[r.cluster] = r
return clusterDict
#Takes two lists of reads and returns a list of [read1, read2] read objects from the same sequencing cluster
def readPairList(reads1, reads2):
pairs = []
clusters1 = buildClusterDict(reads1)
clusters2 = buildClusterDict(reads2)
for c in clusters1:
if c in clusters2:
pairs.append([clusters1[c], clusters2[c]])
return pairs
class pairedSeq(object):
def __init__(self, seq_name1, seq_name2, cluster, header1, header2, seq,
seqN):
self.seq_name1 = seq_name1
self.seq_name2 = seq_name2
self.cluster = cluster
self.header1 = header1
self.header2 = header2
self.seq = seq
self.seqN = seqN
|
sjspence/spenceOTU
|
epicBarcoder/pairedEnds.py
|
Python
|
mit
| 1,858
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#
from __future__ import absolute_import, unicode_literals, division
from datetime import date
from flask import (
Blueprint, render_template, url_for, request, redirect, abort,
jsonify, flash
)
from flask_login import current_user
from oclubs.utils.dates import today, str_to_date_dict, date_to_str_dict, \
int_to_dateobj
from oclubs.enums import UserType, ActivityTime, Building, \
SBAppStatus, ResStatus
from oclubs.shared import (
get_callsign_decorator, special_access_required, Pagination
)
from oclubs.objs import Club, Reservation, Classroom
from oclubs.forms.classroom_forms import (
ClassroomSidebarForm, ClearSelectionForm, ViewClassroomsForm
)
from oclubs.forms.reservation_forms import (
NewReservationForm,
ChangeSBStatusForm, ChangeDirectorsApprovalForm,
ChangeInstructorsApprovalForm,
ChangeCanReservationForm, ChangeCanSmartboardForm
)
from oclubs.exceptions import NoRow
resblueprint = Blueprint('resblueprint', __name__)
@resblueprint.route('/viewres/<resfilter:res_filter>/', defaults={'page': 1},
methods=['GET', 'POST'])
@resblueprint.route('/viewres/<resfilter:res_filter>/<int:page>',
methods=['GET', 'POST'])
def viewreservations(res_filter, page):
'''Display reservations'''
# generate template parameters
res_num = 20
count, res = Reservation.get_reservations_conditions(
limit=((page-1)*res_num, res_num),
**res_filter.to_kwargs())
pagination = Pagination(page, res_num, count)
# admins get a different page
is_admin = False
if current_user.is_authenticated:
if current_user.type == UserType.CLASSROOM_ADMIN or \
current_user.type == UserType.DIRECTOR or \
current_user.type == UserType.ADMIN:
is_admin = True
# generate list of possible classrooms to select from based on users
# selection of other filter options
available_classrooms = Classroom.get_classroom_conditions(
buildings=res_filter.conds[0] if res_filter.conds[0] else None,
timeslot=res_filter.conds[1] if res_filter.conds[1] else None)
classrooms_list = [(str(r.id), r.room_number)
for r in available_classrooms]
form = ClassroomSidebarForm()
# dynamically set the selection form choices
form.classrooms_list.choices = classrooms_list
clearBtn = ClearSelectionForm()
if request.method == 'POST':
# rebuild the res_filter
temp_filter = list(res_filter.conds)
# after submit selection
if form.submit_classrooms.data:
if form.classrooms_list.data:
selected_classrooms_id = \
str(form.classrooms_list.data).split(',')
# convert a list of room_id from form data
# to a list of room_numbers for res_filter
temp_filter[2] = [dict(classrooms_list)[id]
for id in selected_classrooms_id]
else:
temp_filter[2] = None
# after clear selection
if clearBtn.clear.data:
temp_filter[2] = None
res_filter.conds = tuple(temp_filter)
# refresh the page with the updated res_filter
return redirect(url_for('.viewreservations', res_filter=res_filter))
# preserve form selections from the previous session
defaultSelection = []
if res_filter.conds[2] is not None:
# convert a list of room_numbers from res_filter
# to a list of room_id
for r in available_classrooms:
if str(r.room_number) in res_filter.conds[2]:
defaultSelection.append(r.id)
form.classrooms_list.process_data(defaultSelection)
return render_template('reservation/viewres.html.j2',
is_viewres=True,
res=res,
pagination=pagination,
res_filter=res_filter,
form=form,
clearBtn=clearBtn,
is_admin=is_admin)
@resblueprint.route('/')
def res_home_redirect():
return redirect(url_for('.viewclassrooms', room_filter='all'))
@resblueprint.route('/viewres')
@resblueprint.route('/viewres/')
def viewres_redirect():
return redirect(url_for('.viewreservations', res_filter='all'))
@resblueprint.route('/viewroom')
@resblueprint.route('/viewroom/')
def viewroom_redirect():
return redirect(url_for('.viewclassrooms', room_filter='all'))
@resblueprint.route('/viewroom/<roomfilter:room_filter>/',
methods=['GET', 'POST'])
def viewclassrooms(room_filter):
'''Display classrooms'''
rdict = Classroom.get_free_classroom_conditions(
**room_filter.to_kwargs())
# partially unpack dict
buildings = [building for building in rdict.keys()]
timeslots = [timeslot for timeslot in rdict[buildings[0]].keys()]
buildings.sort(key=lambda b: b.value)
timeslots.sort(key=lambda t: t.value)
# display all classrooms no longer used
# no date provided, display all classrooms
if room_filter.conds[2] is None:
is_all = True
# display order: building, timeslot, rooms
info = [buildings, timeslots, rdict]
# date provided, display free classrooms
else:
is_all = False
# unpack date
dates = [single_date for single_date in rdict[buildings[0]]
[timeslots[0]].keys()]
dates.sort()
# display order: date, building, timeslot, rooms
info = [dates, buildings, timeslots, rdict]
form = ViewClassroomsForm()
if request.method == "POST":
# rebuild the room_filter
temp_filter = list(room_filter.conds)
# this choice can no longer be selected
if form.viewclassroom_options.data == 'all_classrooms':
temp_filter[2] = None
else:
try:
# try to match one of the keywords
temp_filter[2] = str_to_date_dict()[form.date_options.data]
except KeyError:
# if its a custom entered date, do form checking
if form.check():
if form.date_options.data == 'singledate':
temp_filter[2] = form.date_select_start.data
elif form.date_options.data == 'daterange':
temp_filter[2] = (form.date_select_start.data,
form.date_select_end.data)
else:
# form check failed, render template again with error msg
return render_template('reservation/viewroom.html.j2',
is_viewroom=True,
room_filter=room_filter,
is_all=is_all,
info=info,
today=today(),
form=form)
room_filter.conds = tuple(temp_filter)
return redirect(url_for('.viewclassrooms', room_filter=room_filter))
# preserve form selections from the previous session
if room_filter.conds[2] is None:
form.viewclassroom_options.process_data('all_classrooms')
else:
form.viewclassroom_options.process_data('free_classrooms')
try:
str = date_to_str_dict()[room_filter.conds[2]]
form.date_options.process_data(str)
except KeyError:
if isinstance(room_filter.conds[2], date):
form.date_select_start.process_data(room_filter.conds[2])
else:
form.date_options.process_data('daterange')
form.date_select_start.process_data(room_filter.conds[2][0])
form.date_select_end.process_data(room_filter.conds[2][1])
return render_template('reservation/viewroom.html.j2',
is_viewroom=True,
room_filter=room_filter,
is_all=is_all,
info=info,
today=today(),
form=form)
@resblueprint.route('/<reservation>', methods=['GET', 'POST'])
@get_callsign_decorator(Reservation, 'reservation')
def reservationinfo(reservation):
'''Information page for a reservation'''
# determine privileges
is_admin = False
is_owner = False
is_teacher = False
is_director = False
if current_user.is_authenticated:
if current_user.type == UserType.CLASSROOM_ADMIN or \
current_user.type == UserType.DIRECTOR or \
current_user.type == UserType.ADMIN:
is_admin = True
if current_user.type == UserType.DIRECTOR:
is_director = True
if current_user.type == UserType.TEACHER:
is_teacher = True
if current_user == reservation.owner:
is_owner = True
# set default value to current value
SBAppStatus_form = ChangeSBStatusForm(
changeStatus=reservation.SBApp_status.name.lower())
directors_approval_form = ChangeDirectorsApprovalForm(
changeDApproval=str(reservation.directors_approval)
)
instructors_approval_form = ChangeInstructorsApprovalForm(
changeIApproval=str(reservation.instructors_approval)
)
if request.method == 'POST':
if SBAppStatus_form.submit.data:
reservation.SBApp_status = \
SBAppStatus[SBAppStatus_form.changeStatus.data.upper()]
if directors_approval_form.submit.data:
reservation.directors_approval = (
True
if directors_approval_form.changeDApproval.data == 'True'
else False)
if instructors_approval_form.submit.data:
reservation.instructors_approval = (
True
if instructors_approval_form.changeIApproval.data == 'True'
else False)
return redirect(url_for('.reservationinfo',
reservation=reservation.callsign))
return render_template('reservation/resinfo.html.j2',
is_admin=is_admin,
is_owner=is_owner,
is_teacher=is_teacher,
is_director=is_director,
SBAppStatus_form=SBAppStatus_form,
directors_approval_form=directors_approval_form,
instructors_approval_form=instructors_approval_form)
@resblueprint.route('/new/club/<club>', methods=['GET', 'POST'])
@get_callsign_decorator(Club, 'club')
@special_access_required
def newreservation_club(club):
'''For clubs to create new reservations'''
form = NewReservationForm()
can_reserve = club.reservation_allowed
if not can_reserve:
form.submit.render_kw = {'disabled': 'disabled'}
can_SB = club.smartboard_allowed
if request.method == 'POST':
if form.check():
res = Reservation.new()
res.status = ResStatus.UNPAIRED
res.date = form.date_selection.data
res.date_of_reservation = today()
res.activity_name = ""
res.reserver_name = club.name
res.reserver_club = club
res.owner = current_user
building = Building[form.building.data.upper()]
timeslot = ActivityTime[form.timeslot.data.upper()]
res.timeslot = timeslot
room_number = form.free_classrooms.data
classrooms = Classroom.get_classroom_conditions(
buildings=building,
timeslot=timeslot)
for room in classrooms:
if room_number == room.room_number:
classroom = room
res.classroom = classroom
res.SBNeeded = False
res.SBAppDesc = None
res.SBApp_status = SBAppStatus.NA
if can_SB:
if form.SBNeeded.data == 'yes':
res.SBNeeded = True
res.SBAppDesc = form.SBAppDesc.data
res.SBApp_status = SBAppStatus.PENDING
res.activity = None
res.instructors_approval = False
res.directors_approval = False
res.create()
return redirect(url_for('.reservationinfo',
reservation=res.callsign))
else:
return render_template('reservation/newres_club.html.j2',
form=form,
can_reserve=can_reserve,
can_SB=can_SB)
return render_template('reservation/newres_club.html.j2',
form=form,
can_reserve=can_reserve,
can_SB=can_SB)
@resblueprint.route('/update_free_classrooms', methods=['POST'])
def update_free_classrooms():
'''Dynamically provides a list of free classrooms using jQuery'''
# get form data
building = Building[request.form.get('building').upper()]
timeslot = ActivityTime[request.form.get('timeslot').upper()]
single_date = int_to_dateobj(
int(request.form.get('date_selection').replace('-', '')))
rdict = Classroom.get_free_classroom_conditions(
buildings=building,
timeslot=timeslot,
dates=single_date)
free_classrooms = rdict[building][timeslot][single_date]
if free_classrooms:
choices = [(r.room_number, r.room_number) for r in free_classrooms]
else:
choices = [('none', 'None available')]
# update the choices on the client side
return jsonify(choices)
@resblueprint.route('/viewres/club/<club>', methods=['GET', 'POST'])
@get_callsign_decorator(Club, 'club')
def viewreservations_club(club):
is_admin = False
is_owner = False
if current_user.is_authenticated:
if current_user.type == UserType.CLASSROOM_ADMIN or \
current_user.type == UserType.DIRECTOR or \
current_user.type == UserType.ADMIN:
is_admin = True
if current_user == club.leader:
is_owner = True
res = Reservation.get_reservations_conditions(
reserver_club=club)
canReserveForm = ChangeCanReservationForm(
changeCanReserve=str(club.reservation_allowed))
canSBForm = ChangeCanSmartboardForm(
changeCanSB=str(club.smartboard_allowed))
if request.method == 'POST':
if canReserveForm.submit.data:
club.reservation_allowed = \
canReserveForm.changeCanReserve.data == 'True'
if canSBForm.submit.data:
club.smartboard_allowed = \
canSBForm.changeCanSB.data == 'True'
return redirect(url_for('.viewreservations_club', club=club.callsign))
return render_template('reservation/viewres_club.html.j2',
res=res,
is_admin=is_admin,
is_owner=is_owner,
canReserveForm=canReserveForm,
canSBForm=canSBForm)
@resblueprint.route('/<reservation>/delete')
@get_callsign_decorator(Reservation, 'reservation')
def deletereservation(reservation):
club = reservation.reserver_club
single_date = reservation.date
timeslot = reservation.timeslot
building = reservation.classroom.building
room_number = reservation.classroom.room_number
owner = current_user
if reservation.status == ResStatus.UNPAIRED:
pass
elif reservation.status == ResStatus.PAIRED:
reservation.activity.reservation = None
try:
ret = Reservation.delete_reservation(single_date,
timeslot,
building,
room_number,
owner)
except NoRow:
pass
return redirect(url_for('.viewreservations_club', club=club.callsign))
|
SHSIDers/oclubs
|
oclubs/blueprints/resblueprint.py
|
Python
|
mit
| 16,327
|
'''
Defines constants for task class attributes.
@author: Peter Parente <parente@cs.unc.edu>
@copyright: Copyright (c) 2008 Peter Parente
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
# permanence constants
NO_START=1
NO_CANCEL=2
NO_COMPLETE=4
NO_END=NO_COMPLETE|NO_CANCEL
|
parente/clique
|
View/Task/Constants.py
|
Python
|
bsd-3-clause
| 486
|
#/usr/bin/env python
# -#- coding: utf-8 -#-
#
# data_config/my_firmt.py
#
# Standard copyright message
#
#
#
# Initial version: 2012-04-12
# Author: Amnon Janiv
"""
.. module:: data_config/my_firm
:synopsis:equity file layout definition for 'us'
.. moduleauthor:: Amnon Janiv
"""
__revision__ = '$Id: $'
__version__ = '0.0.1'
from equity_master import desc
from equity_master import refdata
from equity_master import config
from data_config import common
#
#Basic configuration section
#
firm = refdata.Firm('my_firm')
#
#Equity definition configuration section
#
eq_def_file_name = 'my_firm_eq.txt'
eq_def_file_desc = desc.FileDescriptor(
firm = firm,
file_name = eq_def_file_name,
fields = common.eq_def_field_desc,
field_sep = config.FIELD_SEP,
header_lines = 1
)
#
#Equity time series configuration section
#
eq_ts_file_name = 'my_firm_ts.txt'
eq_ts_file_desc = desc.FileDescriptor(
firm = firm,
file_name = eq_ts_file_name,
fields = common.eq_ts_field_desc,
field_sep = config.FIELD_SEP,
header_lines = 1)
firm_desc = dict(
eq_file_desc = eq_def_file_desc,
eq_ts_field_desc = eq_ts_file_desc
)
|
ajaniv/equitymaster
|
data_config/my_firm.py
|
Python
|
gpl-2.0
| 1,465
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 19:13:14 2016
@author: chavdar
"""
import csv
import requests
with open('url.csv', 'r') as f:
my_urls = []
reader = csv.reader(f)
for row in reader:
my_urls.append(row)
#
my_html = ""
my_html_list = []
#debug
#my_urls = [my_urls[0]]
for i in my_urls:
# get for all pages of comments on current product
for i_page in range(1,int(i[1])+1): #i[1] is the number of comments pages
current_url = i[0]
my_index = current_url.index("pageNumber=") + len("pageNumber=")
current_url = current_url[:my_index]+str(i_page)
req = requests.get(current_url,headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36'})
html = req.text
# we remove the highest review and lowest review on top of each review page
ind = html.index("reviews-container")
ind2 = html.index("cm_cr-footer_dp_link")
html = html[ind:ind2]
#my_html += html
my_html_list.append(html)
#html_file.write(html)
# Write html data to file
html_file = open("comments_html_new.txt",'w',encoding='utf8')
for i_html in my_html_list:
html_file.write(i_html)
html_file.close()
|
chavdim/amazon_comments
|
scrapper/getCommentsHtml.py
|
Python
|
mit
| 1,344
|
#!/usr/bin/env python
"""
This scripts handles conversion of Markdown README into rst for inclusion
in Sphinx documentation.
"""
import os
from pypandoc import convert
DOC_DIR = os.path.dirname(os.path.abspath(__file__))
MD_README_PATH = os.path.join(os.path.dirname(DOC_DIR), 'README.md')
RST_README_PATH = os.path.join(DOC_DIR, 'readme.rst')
def convert_md():
with open(RST_README_PATH, 'w') as readme:
readme.write(convert(MD_README_PATH, 'rst'))
if __name__ == '__main__':
convert_md()
|
swistakm/graceful
|
docs/convert_readme.py
|
Python
|
bsd-3-clause
| 512
|
"""
Auto-tuning a convolutional network for Mobile GPU
====================================================
**Author**: `Lianmin Zheng <https://https://github.com/merrymercy>`_
Auto-tuning for a specific device is critical for getting the best
performance. This is a tutorial about how to tune a whole convolutional
network.
The operator implementation for Mobile GPU in TVM is written in template form.
The template has many tunable knobs (tile factor, vectorization, unrolling, etc).
We will tune all convolution, depthwise convolution and dense operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the tvm compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some arm devices. You can go to
`Mobile GPU Benchmark <https://github.com/dmlc/tvm/wiki/Benchmark#mobile-gpu>`_
to see the results.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado
#
# To make tvm run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import nnvm.testing
import nnvm.compiler
import tvm
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in nnvm symbol API.
# We can load some pre-defined network from :code:`nnvm.testing`.
# We can also load models from MXNet, ONNX and TensorFlow (see NNVM
# tutorials :ref:`tutorial-nnvm` for more details).
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size)
elif name == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == 'squeezenet_v1.1':
net, params = nnvm.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1')
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
net, params = nnvm.testing.inception_v3.get_workload(batch_size=batch_size)
elif name == 'custom':
# an example for custom network
from nnvm.testing import utils
net = nnvm.sym.Variable('data')
net = nnvm.sym.conv2d(net, channels=4, kernel_size=(3,3), padding=(1,1))
net = nnvm.sym.flatten(net)
net = nnvm.sym.dense(net, units=1000)
net, params = utils.create_workload(net, batch_size, (3, 224, 224))
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
net, params = nnvm.frontend.from_mxnet(block)
net = nnvm.sym.softmax(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized master node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build tvm runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# tvm runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/dmlc/tvm/tree/master/apps/android_rpc>`_ to
# install tvm rpc apk on the android device. Make sure you can pass the android rpc test.
# Then you have already registred your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use an RK3399 board
# as example. In your setting, you should modify the target and device_key accordingly.
# set :code:`use_android` to True if you use android phone.
#### DEVICE CONFIG ####
target = tvm.target.create('opencl -device=mali')
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device.
target_host = 'llvm -target=aarch64-linux-gnu'
# Also replace this with the device key in your tracker
device_key = 'rk3399'
# Set this to True if you use android phone
use_android = False
#### TUNING OPTION ####
network = 'resnet-18'
log_file = "%s.%s.log" % (device_key, network)
dtype = 'float32'
tuning_option = {
'log_filename': log_file,
'tuner': 'xgb',
'n_trial': 1000,
'early_stopping': 450,
'measure_option': autotvm.measure_option(
builder=autotvm.LocalBuilder(
build_func='ndk' if use_android else 'default'),
runner=autotvm.RPCRunner(
device_key, host='localhost', port=9190,
number=10,
timeout=5,
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning run longer.
# If your device runs very slow or your conv2d operators have many GFLOPs, considering to
# set timeout larger.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(tasks,
measure_option,
tuner='xgb',
n_trial=1000,
early_stopping=None,
log_filename='tuning.log',
use_transfer_learning=True,
try_winograd=True):
if try_winograd:
for i in range(len(tasks)):
try: # try winograd template
tsk = autotvm.task.create(tasks[i].name, tasks[i].args,
tasks[i].target, tasks[i].target_host, 'winograd')
tasks.append(tsk)
except Exception:
pass
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i+1, len(tasks))
# create tuner
if tuner == 'xgb' or tuner == 'xgb-rank':
tuner_obj = XGBTuner(tsk, loss_type='rank')
elif tuner == 'ga':
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == 'random':
tuner_obj = RandomTuner(tsk)
elif tuner == 'gridsearch':
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tuner_obj.tune(n_trial=min(n_trial, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file)])
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from nnvm graph
print("Extract tasks...")
net, params, input_shape, out_shape = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_graph(net, target=target, target_host=target_host,
shape={'data': input_shape}, dtype=dtype,
symbols=(nnvm.sym.conv2d, nnvm.sym.dense))
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(
net, target=target, target_host=target_host,
shape={'data': input_shape}, params=params, dtype=dtype)
# export library
tmp = tempdir()
if use_android:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# upload module to device
print("Upload...")
remote = autotvm.measure.request_remote(device_key, 'localhost', 9190,
timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# upload parameters to device
ctx = remote.context(str(target), 0)
module = runtime.create(graph, rlib, ctx)
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input('data', data_tvm)
module.set_input(**params)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=50, repeat=3)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
(np.mean(prof_res), np.std(prof_res)))
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper.
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done.
# [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done.
# [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done.
# [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done.
# [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done.
# [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done.
# [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done.
# [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done.
# [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done.
# [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done.
# [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done.
# [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done.
# [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done.
# [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done.
# [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done.
# [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done.
# [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 128.05 ms (7.74 ms)
#
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.ai
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tutorials/autotvm/tune_nnvm_mobile_gpu.py
|
Python
|
apache-2.0
| 15,940
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.